Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: head/etc/network.subr
===================================================================
--- head/etc/network.subr (revision 287196)
+++ head/etc/network.subr (revision 287197)
@@ -1,1719 +1,1805 @@
#
# Copyright (c) 2003 The FreeBSD Project. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
IFCONFIG_CMD="/sbin/ifconfig"
: ${netif_ipexpand_max:=2048}
#
# Subroutines commonly used from network startup scripts.
# Requires that rc.conf be loaded first.
#
# ifn_start ifn
# Bring up and configure an interface. If some configuration is
# applied, print the interface configuration.
#
ifn_start()
{
local ifn cfg
ifn="$1"
cfg=1
[ -z "$ifn" ] && err 1 "ifn_start called without an interface"
ifscript_up ${ifn} && cfg=0
ifconfig_up ${ifn} && cfg=0
if ! noafif $ifn; then
afexists inet && ipv4_up ${ifn} && cfg=0
afexists inet6 && ipv6_up ${ifn} && cfg=0
fi
childif_create ${ifn} && cfg=0
return $cfg
}
# ifn_stop ifn
# Shutdown and de-configure an interface. If action is taken,
# print the interface name.
#
ifn_stop()
{
local ifn cfg
ifn="$1"
cfg=1
[ -z "$ifn" ] && err 1 "ifn_stop called without an interface"
if ! noafif $ifn; then
afexists inet6 && ipv6_down ${ifn} && cfg=0
afexists inet && ipv4_down ${ifn} && cfg=0
fi
ifconfig_down ${ifn} && cfg=0
ifscript_down ${ifn} && cfg=0
childif_destroy ${ifn} && cfg=0
return $cfg
}
# ifn_vnetup ifn
# Move ifn to the specified vnet jail.
#
ifn_vnetup()
{
ifn_vnet0 $1 vnet
}
# ifn_vnetdown ifn
# Reclaim ifn from the specified vnet jail.
#
ifn_vnetdown()
{
ifn_vnet0 $1 -vnet
}
# ifn_vnet0 ifn action
# Helper function for ifn_vnetup and ifn_vnetdown.
#
ifn_vnet0()
{
local _ifn _cfg _action _vnet
_ifn="$1"
_action="$2"
_cfg=1
if _vnet=$(vnetif $_ifn); then
${IFCONFIG_CMD} $_ifn $_action $_vnet && _cfg=0
fi
return $_cfg
}
# ifconfig_up if
# Evaluate ifconfig(8) arguments for interface $if and
# run ifconfig(8) with those arguments. It returns 0 if
# arguments were found and executed or 1 if the interface
# had no arguments. Pseudo arguments DHCP and WPA are handled
# here.
#
ifconfig_up()
{
local _cfg _ipv6_opts ifconfig_args
_cfg=1
# Make sure lo0 always comes up.
if [ "$1" = "lo0" ]; then
_cfg=0
fi
# inet6 specific
if ! noafif $1 && afexists inet6; then
if checkyesno ipv6_activate_all_interfaces; then
_ipv6_opts="-ifdisabled"
elif [ "$1" != "lo0" ]; then
_ipv6_opts="ifdisabled"
fi
# backward compatibility: $ipv6_enable
case $ipv6_enable in
[Yy][Ee][Ss]|[Tt][Rr][Uu][Ee]|[Oo][Nn]|1)
case $1 in
bridge[0-9]*)
# No accept_rtadv by default on if_bridge(4)
# to avoid a conflict with the member
# interfaces.
;;
*)
if ! checkyesno ipv6_gateway_enable; then
_ipv6_opts="${_ipv6_opts} accept_rtadv"
fi
;;
esac
;;
esac
case $ipv6_cpe_wanif in
$1)
_ipv6_opts="${_ipv6_opts} -no_radr accept_rtadv"
;;
esac
if [ -n "${_ipv6_opts}" ]; then
${IFCONFIG_CMD} $1 inet6 ${_ipv6_opts}
fi
fi
# ifconfig_IF
ifconfig_args=`ifconfig_getargs $1`
if [ -n "${ifconfig_args}" ]; then
eval ${IFCONFIG_CMD} $1 ${ifconfig_args}
_cfg=0
fi
# inet6 specific
if ! noafif $1 && afexists inet6; then
# ifconfig_IF_ipv6
ifconfig_args=`ifconfig_getargs $1 ipv6`
if [ -n "${ifconfig_args}" ]; then
# backward compatibility: inet6 keyword
case "${ifconfig_args}" in
:*|[0-9a-fA-F]*:*)
warn "\$ifconfig_$1_ipv6 needs leading" \
"\"inet6\" keyword for an IPv6 address."
ifconfig_args="inet6 ${ifconfig_args}"
;;
esac
${IFCONFIG_CMD} $1 inet6 -ifdisabled
eval ${IFCONFIG_CMD} $1 ${ifconfig_args}
_cfg=0
fi
# $ipv6_prefix_IF will be handled in
# ipv6_prefix_hostid_addr_common().
ifconfig_args=`get_if_var $1 ipv6_prefix_IF`
if [ -n "${ifconfig_args}" ]; then
${IFCONFIG_CMD} $1 inet6 -ifdisabled
_cfg=0
fi
# backward compatibility: $ipv6_ifconfig_IF
ifconfig_args=`get_if_var $1 ipv6_ifconfig_IF`
if [ -n "${ifconfig_args}" ]; then
warn "\$ipv6_ifconfig_$1 is obsolete." \
" Use ifconfig_$1_ipv6 instead."
${IFCONFIG_CMD} $1 inet6 -ifdisabled
eval ${IFCONFIG_CMD} $1 inet6 ${ifconfig_args}
_cfg=0
fi
fi
ifalias $1 link alias
ifalias $1 ether alias
if [ ${_cfg} -eq 0 ]; then
${IFCONFIG_CMD} $1 up
fi
if wpaif $1; then
/etc/rc.d/wpa_supplicant start $1
_cfg=0 # XXX: not sure this should count
elif hostapif $1; then
/etc/rc.d/hostapd start $1
_cfg=0
fi
if dhcpif $1; then
if [ $_cfg -ne 0 ] ; then
${IFCONFIG_CMD} $1 up
fi
if syncdhcpif $1; then
/etc/rc.d/dhclient start $1
fi
_cfg=0
fi
return $_cfg
}
# ifconfig_down if
# returns 1 if wpa_supplicant or dhclient was stopped or
# the interface exists.
#
ifconfig_down()
{
local _cfg
_cfg=1
if wpaif $1; then
/etc/rc.d/wpa_supplicant stop $1
_cfg=0
elif hostapif $1; then
/etc/rc.d/hostapd stop $1
_cfg=0
fi
if dhcpif $1; then
/etc/rc.d/dhclient stop $1
_cfg=0
fi
if ifexists $1; then
${IFCONFIG_CMD} $1 down
_cfg=0
fi
return $_cfg
}
# get_if_var if var [default]
# Return the value of the pseudo-hash corresponding to $if where
# $var is a string containg the sub-string "IF" which will be
# replaced with $if after the characters defined in _punct are
# replaced with '_'. If the variable is unset, replace it with
# $default if given.
get_if_var()
{
local _if _punct _punct_c _var _default prefix suffix
if [ $# -ne 2 -a $# -ne 3 ]; then
err 3 'USAGE: get_if_var name var [default]'
fi
_if=$1
_punct=".-/+"
ltr ${_if} "${_punct}" '_' _if
_var=$2
_default=$3
prefix=${_var%%IF*}
suffix=${_var##*IF}
eval echo \${${prefix}${_if}${suffix}-${_default}}
}
# _ifconfig_getargs if [af]
# Prints the arguments for the supplied interface to stdout.
# Returns 1 if empty. In general, ifconfig_getargs should be used
# outside this file.
_ifconfig_getargs()
{
local _ifn _af
_ifn=$1
_af=${2+_$2}
if [ -z "$_ifn" ]; then
return 1
fi
get_if_var $_ifn ifconfig_IF$_af "$ifconfig_DEFAULT"
}
# ifconfig_getargs if [af]
# Takes the result from _ifconfig_getargs and removes pseudo
# args such as DHCP and WPA.
ifconfig_getargs()
{
local _tmpargs _arg _args _vnet
_tmpargs=`_ifconfig_getargs $1 $2`
if [ $? -eq 1 ]; then
return 1
fi
_args=
_vnet=0
for _arg in $_tmpargs; do
case $_arg:$_vnet in
[Dd][Hh][Cc][Pp]:0) ;;
[Nn][Oo][Aa][Uu][Tt][Oo]:0) ;;
[Nn][Oo][Ss][Yy][Nn][Cc][Dd][Hh][Cc][Pp]:0) ;;
[Ss][Yy][Nn][Cc][Dd][Hh][Cc][Pp]:0) ;;
[Ww][Pp][Aa]:0) ;;
[Hh][Oo][Ss][Tt][Aa][Pp]:0) ;;
vnet:0) _vnet=1 ;;
*:1) _vnet=0 ;;
*:0)
_args="$_args $_arg"
;;
esac
done
echo $_args
}
# autoif
# Returns 0 if the interface should be automatically configured at
# boot time and 1 otherwise.
autoif()
{
local _tmpargs _arg
_tmpargs=`_ifconfig_getargs $1`
for _arg in $_tmpargs; do
case $_arg in
[Nn][Oo][Aa][Uu][Tt][Oo])
return 1
;;
esac
done
return 0
}
# dhcpif if
# Returns 0 if the interface is a DHCP interface and 1 otherwise.
dhcpif()
{
local _tmpargs _arg
_tmpargs=`_ifconfig_getargs $1`
case $1 in
lo[0-9]*|\
stf[0-9]*|\
lp[0-9]*|\
sl[0-9]*)
return 1
;;
esac
if noafif $1; then
return 1
fi
for _arg in $_tmpargs; do
case $_arg in
[Dd][Hh][Cc][Pp])
return 0
;;
[Nn][Oo][Ss][Yy][Nn][Cc][Dd][Hh][Cc][Pp])
return 0
;;
[Ss][Yy][Nn][Cc][Dd][Hh][Cc][Pp])
return 0
;;
esac
done
return 1
}
# syncdhcpif
# Returns 0 if the interface should be configured synchronously and
# 1 otherwise.
syncdhcpif()
{
local _tmpargs _arg
_tmpargs=`_ifconfig_getargs $1`
if noafif $1; then
return 1
fi
for _arg in $_tmpargs; do
case $_arg in
[Nn][Oo][Ss][Yy][Nn][Cc][Dd][Hh][Cc][Pp])
return 1
;;
[Ss][Yy][Nn][Cc][Dd][Hh][Cc][Pp])
return 0
;;
esac
done
checkyesno synchronous_dhclient
}
# wpaif if
# Returns 0 if the interface is a WPA interface and 1 otherwise.
wpaif()
{
local _tmpargs _arg
_tmpargs=`_ifconfig_getargs $1`
for _arg in $_tmpargs; do
case $_arg in
[Ww][Pp][Aa])
return 0
;;
esac
done
return 1
}
# hostapif if
# Returns 0 if the interface is a HOSTAP interface and 1 otherwise.
hostapif()
{
local _tmpargs _arg
_tmpargs=`_ifconfig_getargs $1`
for _arg in $_tmpargs; do
case $_arg in
[Hh][Oo][Ss][Tt][Aa][Pp])
return 0
;;
esac
done
return 1
}
# vnetif if
# Returns 0 and echo jail if "vnet" keyword is specified on the
# interface, and 1 otherwise.
vnetif()
{
local _tmpargs _arg _vnet
_tmpargs=`_ifconfig_getargs $1`
_vnet=0
for _arg in $_tmpargs; do
case $_arg:$_vnet in
vnet:0) _vnet=1 ;;
*:1) echo $_arg; return 0 ;;
esac
done
return 1
}
# afexists af
# Returns 0 if the address family is enabled in the kernel
# 1 otherwise.
afexists()
{
local _af
_af=$1
case ${_af} in
inet|inet6)
check_kern_features ${_af}
;;
atm)
if [ -x /sbin/atmconfig ]; then
/sbin/atmconfig diag list > /dev/null 2>&1
else
return 1
fi
;;
link|ether)
return 0
;;
*)
err 1 "afexists(): Unsupported address family: $_af"
;;
esac
}
# noafif if
# Returns 0 if the interface has no af configuration and 1 otherwise.
noafif()
{
local _if
_if=$1
case $_if in
pflog[0-9]*|\
pfsync[0-9]*|\
usbus[0-9]*|\
an[0-9]*|\
ath[0-9]*|\
ipw[0-9]*|\
ipfw[0-9]*|\
iwi[0-9]*|\
iwn[0-9]*|\
ral[0-9]*|\
wi[0-9]*|\
wl[0-9]*|\
wpi[0-9]*)
return 0
;;
esac
return 1
}
# ipv6if if
# Returns 0 if the interface should be configured for IPv6 and
# 1 otherwise.
ipv6if()
{
local _if _tmpargs i
_if=$1
if ! afexists inet6; then
return 1
fi
# lo0 is always IPv6-enabled
case $_if in
lo0)
return 0
;;
esac
case "${ipv6_network_interfaces}" in
$_if|"$_if "*|*" $_if"|*" $_if "*|[Aa][Uu][Tt][Oo])
# True if $ifconfig_IF_ipv6 is defined.
_tmpargs=`_ifconfig_getargs $_if ipv6`
if [ -n "${_tmpargs}" ]; then
return 0
fi
# True if $ipv6_prefix_IF is defined.
_tmpargs=`get_if_var $_if ipv6_prefix_IF`
if [ -n "${_tmpargs}" ]; then
return 0
fi
# backward compatibility: True if $ipv6_ifconfig_IF is defined.
_tmpargs=`get_if_var $_if ipv6_ifconfig_IF`
if [ -n "${_tmpargs}" ]; then
return 0
fi
;;
esac
return 1
}
# ipv6_autoconfif if
# Returns 0 if the interface should be configured for IPv6 with
# Stateless Address Configuration; 1 otherwise.
ipv6_autoconfif()
{
local _if _tmpargs _arg
_if=$1
case $_if in
lo[0-9]*|\
stf[0-9]*|\
lp[0-9]*|\
sl[0-9]*)
return 1
;;
esac
if noafif $_if; then
return 1
fi
if ! ipv6if $_if; then
return 1
fi
if checkyesno ipv6_gateway_enable; then
return 1
fi
_tmpargs=`get_if_var $_if ipv6_prefix_IF`
if [ -n "${_tmpargs}" ]; then
return 1
fi
# backward compatibility: $ipv6_enable
case $ipv6_enable in
[Yy][Ee][Ss]|[Tt][Rr][Uu][Ee]|[Oo][Nn]|1)
if checkyesno ipv6_gateway_enable; then
return 1
fi
case $1 in
bridge[0-9]*)
# No accept_rtadv by default on if_bridge(4)
# to avoid a conflict with the member
# interfaces.
return 1
;;
*)
return 0
;;
esac
;;
esac
_tmpargs=`_ifconfig_getargs $_if ipv6`
for _arg in $_tmpargs; do
case $_arg in
accept_rtadv)
return 0
;;
esac
done
# backward compatibility: $ipv6_ifconfig_IF
_tmpargs=`get_if_var $_if ipv6_ifconfig_IF`
for _arg in $_tmpargs; do
case $_arg in
accept_rtadv)
return 0
;;
esac
done
return 1
}
# ifexists if
# Returns 0 if the interface exists and 1 otherwise.
ifexists()
{
[ -z "$1" ] && return 1
${IFCONFIG_CMD} -n $1 > /dev/null 2>&1
}
# ipv4_up if
# add IPv4 addresses to the interface $if
ipv4_up()
{
local _if _ret
_if=$1
_ret=1
# Add 127.0.0.1/8 to lo0 unless otherwise specified.
if [ "${_if}" = "lo0" ]; then
ifconfig_args=`get_if_var ${_if} ifconfig_IF`
if [ -z "${ifconfig_args}" ]; then
${IFCONFIG_CMD} ${_if} inet 127.0.0.1/8 alias
fi
fi
ifalias ${_if} inet alias && _ret=0
return $_ret
}
# ipv6_up if
# add IPv6 addresses to the interface $if
ipv6_up()
{
local _if _ret
_if=$1
_ret=1
if ! ipv6if $_if; then
return 0
fi
ifalias ${_if} inet6 alias && _ret=0
ipv6_prefix_hostid_addr_common ${_if} alias && _ret=0
ipv6_accept_rtadv_up ${_if} && _ret=0
return $_ret
}
# ipv4_down if
# remove IPv4 addresses from the interface $if
ipv4_down()
{
local _if _ifs _ret inetList oldifs _inet
_if=$1
_ifs="^"
_ret=1
ifalias ${_if} inet -alias && _ret=0
inetList="`${IFCONFIG_CMD} ${_if} | grep 'inet ' | tr "\n\t" "$_ifs"`"
oldifs="$IFS"
IFS="$_ifs"
for _inet in $inetList ; do
# get rid of extraneous line
case $_inet in
inet\ *) ;;
*) continue ;;
esac
_inet=`expr "$_inet" : '.*\(inet \([0-9]\{1,3\}\.\)\{3\}[0-9]\{1,3\}\).*'`
IFS="$oldifs"
${IFCONFIG_CMD} ${_if} ${_inet} delete
IFS="$_ifs"
_ret=0
done
IFS="$oldifs"
return $_ret
}
# ipv6_down if
# remove IPv6 addresses from the interface $if
ipv6_down()
{
local _if _ifs _ret inetList oldifs _inet6
_if=$1
_ifs="^"
_ret=1
if ! ipv6if $_if; then
return 0
fi
ipv6_accept_rtadv_down ${_if} && _ret=0
ipv6_prefix_hostid_addr_common ${_if} -alias && _ret=0
ifalias ${_if} inet6 -alias && _ret=0
inetList="`${IFCONFIG_CMD} ${_if} | grep 'inet6 ' | tr "\n\t" "$_ifs"`"
oldifs="$IFS"
IFS="$_ifs"
for _inet6 in $inetList ; do
# get rid of extraneous line
case $_inet6 in
inet6\ *) ;;
*) continue ;;
esac
_inet6=`expr "$_inet6" : '.*\(inet6 \([0-9a-f:]*\)\).*'`
IFS="$oldifs"
${IFCONFIG_CMD} ${_if} ${_inet6} -alias
IFS="$_ifs"
_ret=0
done
IFS="$oldifs"
return $_ret
}
# ifalias if af action
# Configure or remove aliases for network interface $if.
# It returns 0 if at least one alias was configured or
# removed, or 1 if there were none.
#
ifalias()
{
local _ret
_ret=1
afexists $2 || return $_ret
case "$2" in
inet|inet6|link|ether)
ifalias_af_common $1 $2 $3 && _ret=0
;;
esac
return $_ret
}
# ifalias_expand_addr af action addr
# Expand address range ("N-M") specification in addr.
# "addr" must not include an address-family keyword.
# The results will include an address-family keyword.
#
ifalias_expand_addr()
{
local _af _action
_af=$1
_action=$2
shift 2
afexists $_af || return
ifalias_expand_addr_$_af $_action $*
}
# ifalias_expand_addr_inet action addr
# Helper function for ifalias_expand_addr(). Handles IPv4.
#
ifalias_expand_addr_inet()
{
local _action _arg _cidr _cidr_addr _exargs
local _ipaddr _plen _range _iphead _iptail _iplow _iphigh _ipcount
local _retstr _c
_action=$1
_arg=$2
shift 2
_exargs=$*
_retstr=
case $_action:$_arg:$_exargs in
*:*--*) return ;; # invalid
tmp:*[0-9]-[0-9]*:*) # to be expanded
_action="alias"
;;
*:*[0-9]-[0-9]*:*) # to be expanded
;;
tmp:*:*netmask*) # already expanded w/ netmask option
echo ${_arg%/[0-9]*} $_exargs && return
;;
tmp:*:*) # already expanded w/o netmask option
echo $_arg $_exargs && return
;;
*:*:*netmask*) # already expanded w/ netmask option
echo inet ${_arg%/[0-9]*} $_exargs && return
;;
*:*:*) # already expanded w/o netmask option
echo inet $_arg $_exargs && return
;;
esac
for _cidr in $_arg; do
_ipaddr=${_cidr%%/*}
_plen=${_cidr##*/}
# When subnet prefix length is not specified, use /32.
case $_plen in
$_ipaddr) _plen=32 ;; # "/" character not found
esac
OIFS=$IFS
IFS=. set -- $_ipaddr
_range=
_iphead=
_iptail=
for _c in $@; do
case $_range:$_c in
:[0-9]*-[0-9]*)
_range=$_c
;;
:*)
_iphead="${_iphead}${_iphead:+.}${_c}"
;;
*:*)
_iptail="${_iptail}${_iptail:+.}${_c}"
;;
esac
done
IFS=$OIFS
_iplow=${_range%-*}
_iphigh=${_range#*-}
# clear netmask when removing aliases
if [ "$_action" = "-alias" ]; then
_plen=""
fi
_ipcount=$_iplow
while [ "$_ipcount" -le "$_iphigh" ]; do
_retstr="${_retstr} ${_iphead}${_iphead:+.}${_ipcount}${_iptail:+.}${_iptail}${_plen:+/}${_plen}"
if [ $_ipcount -gt $(($_iplow + $netif_ipexpand_max)) ]; then
warn "Range specification is too large (${_iphead}${_iphead:+.}${_iplow}${_iptail:+.}${_iptail}-${_iphead}${_iphead:+.}${_iphigh}${_iptail:+.}${_iptail}). ${_iphead}${_iphead:+.}${_iplow}${_iptail:+.}${_iptail}-${_iphead}${_iphead:+.}${_ipcount}${_iptail:+.}${_iptail} was processed. Increase \$netif_ipexpand_max in rc.conf."
break
else
_ipcount=$(($_ipcount + 1))
fi
# Forcibly set /32 for remaining aliases.
_plen=32
done
done
for _c in $_retstr; do
ifalias_expand_addr_inet $_action $_c $_exargs
done
}
# ifalias_expand_addr_inet6 action addr
# Helper function for ifalias_expand_addr(). Handles IPv6.
#
ifalias_expand_addr_inet6()
{
local _action _arg _cidr _cidr_addr _exargs
local _ipaddr _plen _ipleft _ipright _iplow _iphigh _ipcount
local _ipv4part
local _retstr _c
_action=$1
_arg=$2
shift 2
_exargs=$*
_retstr=
case $_action:$_arg:$_exargs in
*:*--*:*) return ;; # invalid
tmp:*[0-9a-zA-Z]-[0-9a-zA-Z]*:*)# to be expanded
_action="alias"
;;
*:*[0-9a-zA-Z]-[0-9a-zA-Z]*:*) # to be expanded
;;
tmp:*:*prefixlen*) # already expanded w/ prefixlen option
echo ${_arg%/[0-9]*} $_exargs && return
;;
tmp:*:*) # already expanded w/o prefixlen option
echo $_arg $_exargs && return
;;
*:*:*prefixlen*) # already expanded w/ prefixlen option
echo inet6 ${_arg%/[0-9]*} $_exargs && return
;;
*:*:*) # already expanded w/o prefixlen option
echo inet6 $_arg $_exargs && return
;;
esac
for _cidr in $_arg; do
_ipaddr="${_cidr%%/*}"
_plen="${_cidr##*/}"
case $_action:$_ipaddr:$_cidr in
-alias:*:*) unset _plen ;;
*:$_cidr:$_ipaddr) unset _plen ;;
esac
if [ "${_ipaddr%:*.*.*.*}" = "$_ipaddr" ]; then
# Handle !v4mapped && !v4compat addresses.
# The default prefix length is 64.
case $_ipaddr:$_cidr in
$_cidr:$_ipaddr) _plen="64" ;;
esac
_ipleft=${_ipaddr%-*}
_ipright=${_ipaddr#*-}
_iplow=${_ipleft##*:}
_iphigh=${_ipright%%:*}
_ipleft=${_ipleft%:*}
_ipright=${_ipright#*:}
if [ "$_iphigh" = "$_ipright" ]; then
unset _ipright
else
_ipright=:$_ipright
fi
if [ -n "$_iplow" -a -n "$_iphigh" ]; then
_iplow=$((0x$_iplow))
_iphigh=$((0x$_iphigh))
_ipcount=$_iplow
while [ $_ipcount -le $_iphigh ]; do
_r=`printf "%s:%04x%s%s" \
$_ipleft $_ipcount $_ipright \
${_plen:+/}$_plen`
_retstr="$_retstr $_r"
if [ $_ipcount -gt $(($_iplow + $netif_ipexpand_max)) ]
then
warn "Range specification is too large $(printf '(%s:%x%s-%s:%x%s)' "$_ipleft" "$_iplow" "$_ipright" "$_ipleft" "$_iphigh" "$_ipright"). $(printf '%s:%x%s-%s:%x%s' "$_ipleft" "$_iplow" "$_ipright" "$_ipleft" "$_ipcount" "$_ipright") was processed. Increase \$netif_ipexpand_max in rc.conf."
break
else
_ipcount=$(($_ipcount + 1))
fi
done
else
_retstr="${_ipaddr}${_plen:+/}${_plen}"
fi
for _c in $_retstr; do
ifalias_expand_addr_inet6 $_action $_c $_exargs
done
else
# v4mapped/v4compat should handle as an IPv4 alias
_ipv4part=${_ipaddr##*:}
# Adjust prefix length if any. If not, set the
# default prefix length as 32.
case $_ipaddr:$_cidr in
$_cidr:$_ipaddr) _plen=32 ;;
*) _plen=$(($_plen - 96)) ;;
esac
_retstr=`ifalias_expand_addr_inet \
tmp ${_ipv4part}${_plen:+/}${_plen}`
for _c in $_retstr; do
ifalias_expand_addr_inet $_action $_c $_exargs
done
fi
done
}
# ifalias_af_common_handler if af action args
# Helper function for ifalias_af_common().
#
ifalias_af_common_handler()
{
local _ret _if _af _action _args _c _tmpargs
_ret=1
_if=$1
_af=$2
_action=$3
shift 3
_args=$*
case $_args in
${_af}\ *) ;;
*) return ;;
esac
# link(ether) does not support address removal.
case $_af:$_action in
link:-alias|ether:-alias) return ;;
esac
_tmpargs=
for _c in $_args; do
case $_c in
${_af})
case $_tmpargs in
${_af}\ *[0-9a-fA-F]-*)
ifalias_af_common_handler $_if $_af $_action \
`ifalias_expand_addr $_af $_action ${_tmpargs#${_af}\ }`
;;
${_af}\ *)
${IFCONFIG_CMD} $_if $_tmpargs $_action && _ret=0
;;
esac
_tmpargs=$_af
;;
*)
_tmpargs="$_tmpargs $_c"
;;
esac
done
# Process the last component if any.
if [ -n "$_tmpargs}" ]; then
case $_tmpargs in
${_af}\ *[0-9a-fA-F]-*)
ifalias_af_common_handler $_if $_af $_action \
`ifalias_expand_addr $_af $_action ${_tmpargs#${_af}\ }`
;;
${_af}\ *)
${IFCONFIG_CMD} $_if $_tmpargs $_action && _ret=0
;;
esac
fi
return $_ret
}
# ifalias_af_common if af action
# Helper function for ifalias().
#
ifalias_af_common()
{
local _ret _if _af _action alias ifconfig_args _aliasn _c _tmpargs _iaf
local _vif _punct=".-/+"
_ret=1
_aliasn=
_if=$1
_af=$2
_action=$3
# Normalize $_if before using it in a pattern to list_vars()
ltr "$_if" "$_punct" "_" _vif
# ifconfig_IF_aliasN which starts with $_af
for alias in `list_vars ifconfig_${_vif}_alias[0-9]\* |
sort_lite -nk1.$((9+${#_vif}+7))`
do
eval ifconfig_args=\"\$$alias\"
_iaf=
case $ifconfig_args in
inet\ *) _iaf=inet ;;
inet6\ *) _iaf=inet6 ;;
link\ *) _iaf=link ;;
ether\ *) _iaf=ether ;;
esac
case ${_af}:${_action}:${_iaf}:"${ifconfig_args}" in
${_af}:*:${_af}:*)
_aliasn="$_aliasn $ifconfig_args"
;;
${_af}:*:"":"")
break
;;
inet:alias:"":*)
_aliasn="$_aliasn inet $ifconfig_args"
warn "\$${alias} needs leading" \
"\"inet\" keyword for an IPv4 address."
esac
done
# backward compatibility: ipv6_ifconfig_IF_aliasN.
case $_af in
inet6)
for alias in `list_vars ipv6_ifconfig_${_vif}_alias[0-9]\* |
sort_lite -nk1.$((14+${#_vif}+7))`
do
eval ifconfig_args=\"\$$alias\"
case ${_action}:"${ifconfig_args}" in
*:"")
break
;;
alias:*)
_aliasn="${_aliasn} inet6 ${ifconfig_args}"
warn "\$${alias} is obsolete. " \
"Use ifconfig_${_vif}_aliasN instead."
;;
esac
done
esac
# backward compatibility: ipv4_addrs_IF.
for _tmpargs in `get_if_var $_if ipv4_addrs_IF`; do
_aliasn="$_aliasn inet $_tmpargs"
done
# Handle ifconfig_IF_aliases, ifconfig_IF_aliasN, and the others.
_tmpargs=
for _c in `get_if_var $_if ifconfig_IF_aliases` $_aliasn; do
case $_c in
inet|inet6|link|ether)
case $_tmpargs in
${_af}\ *)
eval ifalias_af_common_handler $_if $_af $_action $_tmpargs && _ret=0
;;
esac
_tmpargs=$_c
;;
*)
_tmpargs="$_tmpargs $_c"
esac
done
# Process the last component
case $_tmpargs in
${_af}\ *)
ifalias_af_common_handler $_if $_af $_action $_tmpargs && _ret=0
;;
esac
return $_ret
}
# ipv6_prefix_hostid_addr_common if action
# Add or remove IPv6 prefix + hostid addr on the interface $if
#
ipv6_prefix_hostid_addr_common()
{
local _if _action prefix j
_if=$1
_action=$2
prefix=`get_if_var ${_if} ipv6_prefix_IF`
if [ -n "${prefix}" ]; then
for j in ${prefix}; do
# The default prefixlen is 64.
plen=${j#*/}
case $j:$plen in
$plen:$j) plen=64 ;;
*) j=${j%/*} ;;
esac
# Normalize the last part by removing ":"
j=${j%::*}
j=${j%:}
${IFCONFIG_CMD} ${_if} inet6 $j:: \
prefixlen $plen eui64 ${_action}
# if I am a router, add subnet router
# anycast address (RFC 2373).
if checkyesno ipv6_gateway_enable; then
${IFCONFIG_CMD} ${_if} inet6 $j:: \
prefixlen $plen ${_action} anycast
fi
done
fi
}
# ipv6_accept_rtadv_up if
# Enable accepting Router Advertisement and send Router
# Solicitation message
ipv6_accept_rtadv_up()
{
if ipv6_autoconfif $1; then
${IFCONFIG_CMD} $1 inet6 accept_rtadv up
if ! checkyesno rtsold_enable; then
rtsol ${rtsol_flags} $1
fi
fi
}
# ipv6_accept_rtadv_down if
# Disable accepting Router Advertisement
ipv6_accept_rtadv_down()
{
if ipv6_autoconfif $1; then
${IFCONFIG_CMD} $1 inet6 -accept_rtadv
fi
}
# ifscript_up if
# Evaluate a startup script for the $if interface.
# It returns 0 if a script was found and processed or
# 1 if no script was found.
#
ifscript_up()
{
if [ -r /etc/start_if.$1 ]; then
. /etc/start_if.$1
return 0
else
return 1
fi
}
# ifscript_down if
# Evaluate a shutdown script for the $if interface.
# It returns 0 if a script was found and processed or
# 1 if no script was found.
#
ifscript_down()
{
if [ -r /etc/stop_if.$1 ]; then
. /etc/stop_if.$1
return 0
else
return 1
fi
}
+# wlan_up
+# Create IEEE802.11 interfaces.
+#
+wlan_up()
+{
+ local _list _iflist parent child_wlans child create_args debug_flags
+ _list=
+ _iflist=$*
+
+ # Parse wlans_$parent="$child ..."
+ for parent in `set | sed -nE 's/wlans_([a-z]+[0-9]+)=.*/\1/p'`; do
+ child_wlans=`get_if_var $parent wlans_IF`
+ for child in ${child_wlans}; do
+ create_args="wlandev $parent `get_if_var $child create_args_IF`"
+ debug_flags="`get_if_var $child wlandebug_IF`"
+ case $_iflist in
+ ""|$child|$child\ *|*\ $child\ *|*\ $child) ;;
+ *) continue ;;
+ esac
+ # Skip if ${child} already exists.
+ if ${IFCONFIG_CMD} $child > /dev/null 2>&1; then
+ continue
+ fi
+ if expr $child : 'wlan[0-9][0-9]*$' >/dev/null 2>&1; then
+ ${IFCONFIG_CMD} $child create ${create_args} && cfg=0
+ if [ $? -eq 0 ]; then
+ _list="$_list $child"
+ fi
+ if [ -n "${debug_flags}" ]; then
+ wlandebug -i $child ${debug_flags}
+ fi
+ else
+ i=`${IFCONFIG_CMD} wlan create ${create_args}`
+ # XXXGL: wlandebug should accept any name
+ if [ -n "${debug_flags}" ]; then
+ wlandebug -i $i ${debug_flags}
+ fi
+ ${IFCONFIG_CMD} $i name $child && cfg=0
+ if [ $? -eq 0 ]; then
+ _list="$_list $child"
+ fi
+ fi
+ done
+ done
+ if [ -n "${_list# }" ]; then
+ echo "Created wlan(4) interfaces: ${_list# }."
+ fi
+ debug "Created wlan(4)s: ${_list# }"
+}
+
+# wlan_down
+# Destroy IEEE802.11 interfaces.
+#
+wlan_down()
+{
+ local _list _iflist parent child_wlans child
+ _list=
+ _iflist=$*
+
+ # Parse wlans_$parent="$child ..."
+ for parent in `set | sed -nE 's/wlans_([a-z]+[0-9]+)=.*/\1/p'`; do
+ child_wlans=`get_if_var $parent wlans_IF`
+ for child in ${child_wlans}; do
+ case $_iflist in
+ ""|$child|$child\ *|*\ $child\ *|*\ $child) ;;
+ *) continue ;;
+ esac
+ # Skip if ${child} doesn't exists.
+ if ! ${IFCONFIG_CMD} $child > /dev/null 2>&1; then
+ continue
+ fi
+ ${IFCONFIG_CMD} -n ${child} destroy
+ if [ $? -eq 0 ]; then
+ _list="$_list $child"
+ fi
+ done
+ done
+ if [ -n "${_list# }" ]; then
+ echo "Destroyed wlan(4) interfaces: ${_list# }."
+ fi
+ debug "Destroyed wlan(4)s: ${_list# }"
+}
+
# clone_up
# Create cloneable interfaces.
#
clone_up()
{
local _list ifn ifopt _iflist _n tmpargs
_list=
_iflist=$*
# create_args_IF
for ifn in ${cloned_interfaces}; do
# Parse ifn:ifopt.
OIFS=$IFS; IFS=:; set -- $ifn; ifn=$1; ifopt=$2; IFS=$OIFS
case $_iflist in
""|$ifn|$ifn\ *|*\ $ifn\ *|*\ $ifn) ;;
*) continue ;;
esac
case $ifn in
epair[0-9]*)
# epair(4) uses epair[0-9] for creation and
# epair[0-9][ab] for configuration.
#
# Skip if ${ifn}a or ${ifn}b already exist.
if ${IFCONFIG_CMD} ${ifn}a > /dev/null 2>&1; then
continue
elif ${IFCONFIG_CMD} ${ifn}b > /dev/null 2>&1; then
continue
fi
${IFCONFIG_CMD} ${ifn} create \
`get_if_var ${ifn} create_args_IF`
if [ $? -eq 0 ]; then
_list="$_list ${ifn}a ${ifn}b"
fi
;;
*)
# Skip if ${ifn} already exists.
if ${IFCONFIG_CMD} $ifn > /dev/null 2>&1; then
continue
fi
${IFCONFIG_CMD} ${ifn} create \
`get_if_var ${ifn} create_args_IF`
if [ $? -eq 0 ]; then
_list="$_list $ifn"
fi
esac
done
if [ -n "$gif_interfaces" ]; then
warn "\$gif_interfaces is obsolete. Use \$cloned_interfaces instead."
fi
for ifn in ${gif_interfaces}; do
# Parse ifn:ifopt.
OIFS=$IFS; IFS=:; set -- $ifn; ifn=$1; ifopt=$2; IFS=$OIFS
case $_iflist in
""|$ifn|$ifn\ *|*\ $ifn\ *|*\ $ifn) ;;
*) continue ;;
esac
# Skip if ifn already exists.
if ${IFCONFIG_CMD} $ifn > /dev/null 2>&1; then
continue
fi
case $ifn in
gif[0-9]*)
${IFCONFIG_CMD} $ifn create
;;
*)
_n=$(${IFCONFIG_CMD} gif create)
${IFCONFIG_CMD} $_n name $ifn
;;
esac
if [ $? -eq 0 ]; then
_list="$_list $ifn"
fi
tmpargs=$(get_if_var $ifn gifconfig_IF)
eval ifconfig_${ifn}=\"tunnel \$tmpargs\"
done
if [ -n "${_list# }" ]; then
echo "Created clone interfaces: ${_list# }."
fi
debug "Cloned: ${_list# }"
}
# clone_down
# Destroy cloned interfaces. Destroyed interfaces are echoed to
# standard output.
#
clone_down()
{
local _list ifn _difn ifopt _iflist _sticky
_list=
_iflist=$*
: ${cloned_interfaces_sticky:=NO}
if checkyesno cloned_interfaces_sticky; then
_sticky=1
else
_sticky=0
fi
for ifn in ${cloned_interfaces} ${gif_interfaces}; do
# Parse ifn:ifopt.
OIFS=$IFS; IFS=:; set -- $ifn; ifn=$1; ifopt=$2; IFS=$OIFS
case $ifopt:$_sticky in
sticky:*) continue ;; # :sticky => not destroy
nosticky:*) ;; # :nosticky => destroy
*:1) continue ;; # global sticky knob == 1
esac
case $_iflist in
""|$ifn|$ifn\ *|*\ $ifn\ *|*\ $ifn) ;;
*) continue ;;
esac
case $ifn in
epair[0-9]*)
# Note: epair(4) uses epair[0-9] for removal and
# epair[0-9][ab] for configuration.
#
# Skip if both of ${ifn}a and ${ifn}b do not exist.
if ${IFCONFIG_CMD} ${ifn}a > /dev/null 2>&1; then
_difn=${ifn}a
elif ${IFCONFIG_CMD} ${ifn}b > /dev/null 2>&1; then
_difn=${ifn}b
else
continue
fi
${IFCONFIG_CMD} -n $_difn destroy
if [ $? -eq 0 ]; then
_list="$_list ${ifn}a ${ifn}b"
fi
;;
*)
# Skip if ifn does not exist.
if ! ${IFCONFIG_CMD} $ifn > /dev/null 2>&1; then
continue
fi
${IFCONFIG_CMD} -n ${ifn} destroy
if [ $? -eq 0 ]; then
_list="$_list $ifn"
fi
;;
esac
done
if [ -n "${_list# }" ]; then
echo "Destroyed clone interfaces: ${_list# }."
fi
debug "Destroyed clones: ${_list# }"
}
# childif_create
# Create and configure child interfaces. Return 0 if child
# interfaces are created.
+#
+# XXXGL: the wlan code in this functions is superseded by wlan_up(),
+# and will go away soon.
#
childif_create()
{
local cfg child child_vlans child_wlans create_args debug_flags ifn i
cfg=1
ifn=$1
# Create wireless interfaces
child_wlans=`get_if_var $ifn wlans_IF`
for child in ${child_wlans}; do
create_args="wlandev $ifn `get_if_var $child create_args_IF`"
debug_flags="`get_if_var $child wlandebug_IF`"
if expr $child : 'wlan[0-9][0-9]*$' >/dev/null 2>&1; then
${IFCONFIG_CMD} $child create ${create_args} && cfg=0
if [ -n "${debug_flags}" ]; then
wlandebug -i $child ${debug_flags}
fi
else
i=`${IFCONFIG_CMD} wlan create ${create_args}`
if [ -n "${debug_flags}" ]; then
wlandebug -i $i ${debug_flags}
fi
${IFCONFIG_CMD} $i name $child && cfg=0
fi
if autoif $child; then
ifn_start $child
fi
done
# Create vlan interfaces
child_vlans=`get_if_var $ifn vlans_IF`
if [ -n "${child_vlans}" ]; then
load_kld if_vlan
fi
for child in ${child_vlans}; do
if expr $child : '[1-9][0-9]*$' >/dev/null 2>&1; then
child="${ifn}.${child}"
create_args=`get_if_var $child create_args_IF`
${IFCONFIG_CMD} $child create ${create_args} && cfg=0
else
create_args="vlandev $ifn `get_if_var $child create_args_IF`"
if expr $child : 'vlan[0-9][0-9]*$' >/dev/null 2>&1; then
${IFCONFIG_CMD} $child create ${create_args} && cfg=0
else
i=`${IFCONFIG_CMD} vlan create ${create_args}`
${IFCONFIG_CMD} $i name $child && cfg=0
fi
fi
if autoif $child; then
ifn_start $child
fi
done
return ${cfg}
}
# childif_destroy
# Destroy child interfaces.
#
childif_destroy()
{
local cfg child child_vlans child_wlans ifn
cfg=1
child_wlans=`get_if_var $ifn wlans_IF`
for child in ${child_wlans}; do
if ! ifexists $child; then
continue
fi
${IFCONFIG_CMD} -n $child destroy && cfg=0
done
child_vlans=`get_if_var $ifn vlans_IF`
for child in ${child_vlans}; do
if expr $child : '[1-9][0-9]*$' >/dev/null 2>&1; then
child="${ifn}.${child}"
fi
if ! ifexists $child; then
continue
fi
${IFCONFIG_CMD} -n $child destroy && cfg=0
done
return ${cfg}
}
# ng_mkpeer
# Create netgraph nodes.
#
ng_mkpeer()
{
ngctl -f - 2> /dev/null <<EOF
mkpeer $*
msg dummy nodeinfo
EOF
}
# ng_create_one
# Create netgraph nodes.
#
ng_create_one()
{
local t
ng_mkpeer $* | while read line; do
t=`expr "${line}" : '.* name="\([a-z]*[0-9]*\)" .*'`
if [ -n "${t}" ]; then
echo ${t}
return
fi
done
}
# ifnet_rename [ifname]
# Rename interfaces if ifconfig_IF_name is defined.
#
ifnet_rename()
{
local _if _ifname
# ifconfig_IF_name
for _if in ${*:-$(${IFCONFIG_CMD} -l)}; do
_ifname=`get_if_var $_if ifconfig_IF_name`
if [ ! -z "$_ifname" ]; then
${IFCONFIG_CMD} $_if name $_ifname
fi
done
return 0
}
# list_net_interfaces type
# List all network interfaces. The type of interface returned
# can be controlled by the type argument. The type
# argument can be any of the following:
# nodhcp - all interfaces, excluding DHCP configured interfaces
# dhcp - list only DHCP configured interfaces
# noautoconf - all interfaces, excluding IPv6 Stateless
# Address Autoconf configured interfaces
# autoconf - list only IPv6 Stateless Address Autoconf
# configured interfaces
# If no argument is specified all network interfaces are output.
# Note that the list will include cloned interfaces if applicable.
# Cloned interfaces must already exist to have a chance to appear
# in the list if ${network_interfaces} is set to `auto'.
#
list_net_interfaces()
{
local type _tmplist _list _autolist _lo _if
type=$1
# Get a list of ALL the interfaces and make lo0 first if it's there.
#
_tmplist=
case ${network_interfaces} in
[Aa][Uu][Tt][Oo])
_autolist="`${IFCONFIG_CMD} -l`"
_lo=
for _if in ${_autolist} ; do
if autoif $_if; then
if [ "$_if" = "lo0" ]; then
_lo="lo0 "
else
_tmplist="${_tmplist} ${_if}"
fi
fi
done
_tmplist="${_lo}${_tmplist# }"
;;
*)
for _if in ${network_interfaces} ${cloned_interfaces}; do
# epair(4) uses epair[0-9] for creation and
# epair[0-9][ab] for configuration.
case $_if in
epair[0-9]*)
_tmplist="$_tmplist ${_if}a ${_if}b"
;;
*)
_tmplist="$_tmplist $_if"
;;
esac
done
#
# lo0 is effectively mandatory, so help prevent foot-shooting
#
case "$_tmplist" in
lo0|'lo0 '*|*' lo0'|*' lo0 '*)
# This is fine, do nothing
_tmplist="${_tmplist# }"
;;
*)
_tmplist="lo0 ${_tmplist# }"
;;
esac
;;
esac
_list=
case "$type" in
nodhcp)
for _if in ${_tmplist} ; do
if ! dhcpif $_if && \
[ -n "`_ifconfig_getargs $_if`" ]; then
_list="${_list# } ${_if}"
fi
done
;;
dhcp)
for _if in ${_tmplist} ; do
if dhcpif $_if; then
_list="${_list# } ${_if}"
fi
done
;;
noautoconf)
for _if in ${_tmplist} ; do
if ! ipv6_autoconfif $_if && \
[ -n "`_ifconfig_getargs $_if ipv6`" ]; then
_list="${_list# } ${_if}"
fi
done
;;
autoconf)
for _if in ${_tmplist} ; do
if ipv6_autoconfif $_if; then
_list="${_list# } ${_if}"
fi
done
;;
*)
_list=${_tmplist}
;;
esac
echo $_list
return 0
}
# get_default_if -address_family
# Get the interface of the default route for the given address family.
# The -address_family argument must be suitable passing to route(8).
#
get_default_if()
{
local routeget oldifs defif line
defif=
oldifs="$IFS"
IFS="
"
for line in `route -n get $1 default 2>/dev/null`; do
case $line in
*interface:*)
defif=${line##*: }
;;
esac
done
IFS=${oldifs}
echo $defif
}
# hexdigit arg
# Echo decimal number $arg (single digit) in hexadecimal format.
hexdigit()
{
printf '%x\n' "$1"
}
# hexprint arg
# Echo decimal number $arg (multiple digits) in hexadecimal format.
hexprint()
{
printf '%x\n' "$1"
}
is_wired_interface()
{
local media
case `${IFCONFIG_CMD} $1 2>/dev/null` in
*media:?Ethernet*) media=Ethernet ;;
esac
test "$media" = "Ethernet"
}
# network6_getladdr if [flag]
# Echo link-local address from $if if any.
# If flag is defined, tentative ones will be excluded.
network6_getladdr()
{
local _if _flag proto addr rest
_if=$1
_flag=$2
${IFCONFIG_CMD} $_if 2>/dev/null | while read proto addr rest; do
case "${proto}/${addr}/${_flag}/${rest}" in
inet6/fe80::*//*)
echo ${addr}
;;
inet6/fe80:://*tentative*) # w/o flag
sleep `${SYSCTL_N} net.inet6.ip6.dad_count`
network6_getladdr $_if $_flags
;;
inet6/fe80::/*/*tentative*) # w/ flag
echo ${addr}
;;
*)
continue
;;
esac
return
done
}
Index: head/etc/rc.d/netif
===================================================================
--- head/etc/rc.d/netif (revision 287196)
+++ head/etc/rc.d/netif (revision 287197)
@@ -1,259 +1,271 @@
#!/bin/sh
#
# Copyright (c) 2003 The FreeBSD Project. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE PROJECT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# $FreeBSD$
#
# PROVIDE: netif
# REQUIRE: atm1 FILESYSTEMS iovctl serial sppp sysctl
# REQUIRE: ipfilter ipfs
# KEYWORD: nojailvnet
. /etc/rc.subr
. /etc/network.subr
name="netif"
rcvar="${name}_enable"
start_cmd="netif_start"
stop_cmd="netif_stop"
+wlanup_cmd="wlan_up"
+wlandown_cmd="wlan_down"
cloneup_cmd="clone_up"
clonedown_cmd="clone_down"
clear_cmd="doclear"
vnetup_cmd="vnet_up"
vnetdown_cmd="vnet_down"
extra_commands="cloneup clonedown clear vnetup vnetdown"
cmdifn=
set_rcvar_obsolete ipv6_enable ipv6_activate_all_interfaces
set_rcvar_obsolete ipv6_prefer
netif_start()
{
local _if
# Set the list of interfaces to work on.
#
cmdifn=$*
if [ -z "$cmdifn" ]; then
#
# We're operating as a general network start routine.
#
# disable SIGINT (Ctrl-c) when running at startup
trap : 2
fi
+ # Create IEEE802.11 interface
+ wlan_up $cmdifn
+
# Create cloned interfaces
clone_up $cmdifn
# Rename interfaces.
ifnet_rename $cmdifn
# Configure the interface(s).
netif_common ifn_start $cmdifn
if [ -f /etc/rc.d/ipfilter ] ; then
# Resync ipfilter
/etc/rc.d/ipfilter quietresync
fi
if [ -f /etc/rc.d/bridge -a -n "$cmdifn" ] ; then
/etc/rc.d/bridge start $cmdifn
fi
if [ -f /etc/rc.d/routing -a -n "$cmdifn" ] ; then
for _if in $cmdifn; do
/etc/rc.d/routing start any $_if
done
fi
}
netif_stop()
{
_clone_down=1
+ _wlan_down=1
netif_stop0 $*
}
doclear()
{
_clone_down=
+ _wlan_down=
netif_stop0 $*
}
netif_stop0()
{
local _if
# Set the list of interfaces to work on.
#
cmdifn=$*
# Deconfigure the interface(s)
netif_common ifn_stop $cmdifn
+
+ # Destroy wlan interfaces
+ if [ -n "$_wlan_down" ]; then
+ wlan_down $cmdifn
+ fi
# Destroy cloned interfaces
if [ -n "$_clone_down" ]; then
clone_down $cmdifn
fi
if [ -f /etc/rc.d/routing -a -n "$cmdifn" ] ; then
for _if in $cmdifn; do
/etc/rc.d/routing stop any $_if
done
fi
}
vnet_up()
{
cmdifn=$*
netif_common ifn_vnetup $cmdifn
}
vnet_down()
{
cmdifn=$*
netif_common ifn_vnetdown $cmdifn
}
# netif_common routine
# Common configuration subroutine for network interfaces. This
# routine takes all the preparatory steps needed for configuriing
# an interface and then calls $routine.
netif_common()
{
local _cooked_list _tmp_list _fail _func _ok _str _cmdifn
_func=
if [ -z "$1" ]; then
err 1 "netif_common(): No function name specified."
else
_func="$1"
shift
fi
# Set the scope of the command (all interfaces or just one).
#
_cooked_list=
_tmp_list=
_cmdifn=$*
if [ -n "$_cmdifn" ]; then
# Don't check that the interface(s) exist. We need to run
# the down code even when the interface doesn't exist to
# kill off wpa_supplicant.
# XXXBED: is this really true or does wpa_supplicant die?
# if so, we should get rid of the devd entry
_cooked_list="$_cmdifn"
else
_cooked_list="`list_net_interfaces`"
fi
# Expand epair[0-9] to epair[0-9][ab].
for ifn in $_cooked_list; do
case ${ifn#epair} in
[0-9]*[ab]) ;; # Skip epair[0-9]*[ab].
[0-9]*)
for _str in $_cooked_list; do
case $_str in
$ifn) _tmp_list="$_tmp_list ${ifn}a ${ifn}b" ;;
*) _tmp_list="$_tmp_list ${ifn}" ;;
esac
done
_cooked_list=${_tmp_list# }
;;
esac
done
_dadwait=
_fail=
_ok=
for ifn in ${_cooked_list# }; do
# Skip if ifn does not exist.
case $_func in
ifn_stop)
if ! ${IFCONFIG_CMD} $ifn > /dev/null 2>&1; then
warn "$ifn does not exist. Skipped."
_fail="${_fail} ${ifn}"
continue
fi
;;
esac
if ${_func} ${ifn} $2; then
_ok="${_ok} ${ifn}"
if ipv6if ${ifn}; then
_dadwait=1
fi
else
_fail="${_fail} ${ifn}"
fi
done
# inet6 address configuration needs sleep for DAD.
case ${_func}:${_dadwait} in
ifn_start:1|ifn_vnetup:1|ifn_vnetdown:1)
sleep `${SYSCTL_N} net.inet6.ip6.dad_count`
sleep 1
;;
esac
_str=
if [ -n "${_ok}" ]; then
case ${_func} in
ifn_start)
_str='Starting'
;;
ifn_stop)
_str='Stopping'
;;
ifn_vnetup)
_str='Moving'
;;
ifn_vnetdown)
_str='Reclaiming'
;;
esac
echo "${_str} Network:${_ok}."
case ${_func} in
ifn_vnetup)
# Clear _ok not to do "ifconfig $ifn"
# because $ifn is no longer in the current vnet.
_ok=
;;
esac
if check_startmsgs; then
for ifn in ${_ok}; do
/sbin/ifconfig ${ifn}
done
fi
fi
debug "The following interfaces were not configured: $_fail"
}
# Load the old "network" config file also for compatibility.
# This is needed for mfsBSD at least.
load_rc_config network
load_rc_config $name
run_rc_command $*
Index: head/sys/dev/ath/ath_rate/sample/sample.c
===================================================================
--- head/sys/dev/ath/ath_rate/sample/sample.c (revision 287196)
+++ head/sys/dev/ath/ath_rate/sample/sample.c (revision 287197)
@@ -1,1396 +1,1393 @@
/*-
* Copyright (c) 2005 John Bicket
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* John Bicket's SampleRate control algorithm.
*/
#include "opt_ath.h"
#include "opt_inet.h"
#include "opt_wlan.h"
#include "opt_ah.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/errno.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/socket.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/if_arp.h>
#include <net/ethernet.h> /* XXX for ether_sprintf */
#include <net80211/ieee80211_var.h>
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/ath_rate/sample/sample.h>
#include <dev/ath/ath_hal/ah_desc.h>
#include <dev/ath/ath_rate/sample/tx_schedules.h>
/*
* This file is an implementation of the SampleRate algorithm
* in "Bit-rate Selection in Wireless Networks"
* (http://www.pdos.lcs.mit.edu/papers/jbicket-ms.ps)
*
* SampleRate chooses the bit-rate it predicts will provide the most
* throughput based on estimates of the expected per-packet
* transmission time for each bit-rate. SampleRate periodically sends
* packets at bit-rates other than the current one to estimate when
* another bit-rate will provide better performance. SampleRate
* switches to another bit-rate when its estimated per-packet
* transmission time becomes smaller than the current bit-rate's.
* SampleRate reduces the number of bit-rates it must sample by
* eliminating those that could not perform better than the one
* currently being used. SampleRate also stops probing at a bit-rate
* if it experiences several successive losses.
*
* The difference between the algorithm in the thesis and the one in this
* file is that the one in this file uses a ewma instead of a window.
*
* Also, this implementation tracks the average transmission time for
* a few different packet sizes independently for each link.
*/
static void ath_rate_ctl_reset(struct ath_softc *, struct ieee80211_node *);
static __inline int
size_to_bin(int size)
{
#if NUM_PACKET_SIZE_BINS > 1
if (size <= packet_size_bins[0])
return 0;
#endif
#if NUM_PACKET_SIZE_BINS > 2
if (size <= packet_size_bins[1])
return 1;
#endif
#if NUM_PACKET_SIZE_BINS > 3
if (size <= packet_size_bins[2])
return 2;
#endif
#if NUM_PACKET_SIZE_BINS > 4
#error "add support for more packet sizes"
#endif
return NUM_PACKET_SIZE_BINS-1;
}
void
ath_rate_node_init(struct ath_softc *sc, struct ath_node *an)
{
/* NB: assumed to be zero'd by caller */
}
void
ath_rate_node_cleanup(struct ath_softc *sc, struct ath_node *an)
{
}
static int
dot11rate(const HAL_RATE_TABLE *rt, int rix)
{
if (rix < 0)
return -1;
return rt->info[rix].phy == IEEE80211_T_HT ?
rt->info[rix].dot11Rate : (rt->info[rix].dot11Rate & IEEE80211_RATE_VAL) / 2;
}
static const char *
dot11rate_label(const HAL_RATE_TABLE *rt, int rix)
{
if (rix < 0)
return "";
return rt->info[rix].phy == IEEE80211_T_HT ? "MCS" : "Mb ";
}
/*
* Return the rix with the lowest average_tx_time,
* or -1 if all the average_tx_times are 0.
*/
static __inline int
pick_best_rate(struct ath_node *an, const HAL_RATE_TABLE *rt,
int size_bin, int require_acked_before)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
int best_rate_rix, best_rate_tt, best_rate_pct;
uint64_t mask;
int rix, tt, pct;
best_rate_rix = 0;
best_rate_tt = 0;
best_rate_pct = 0;
for (mask = sn->ratemask, rix = 0; mask != 0; mask >>= 1, rix++) {
if ((mask & 1) == 0) /* not a supported rate */
continue;
/* Don't pick a non-HT rate for a HT node */
if ((an->an_node.ni_flags & IEEE80211_NODE_HT) &&
(rt->info[rix].phy != IEEE80211_T_HT)) {
continue;
}
tt = sn->stats[size_bin][rix].average_tx_time;
if (tt <= 0 ||
(require_acked_before &&
!sn->stats[size_bin][rix].packets_acked))
continue;
/* Calculate percentage if possible */
if (sn->stats[size_bin][rix].total_packets > 0) {
pct = sn->stats[size_bin][rix].ewma_pct;
} else {
/* XXX for now, assume 95% ok */
pct = 95;
}
/* don't use a bit-rate that has been failing */
if (sn->stats[size_bin][rix].successive_failures > 3)
continue;
/*
* For HT, Don't use a bit rate that is much more
* lossy than the best.
*
* XXX this isn't optimal; it's just designed to
* eliminate rates that are going to be obviously
* worse.
*/
if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
if (best_rate_pct > (pct + 50))
continue;
}
/*
* For non-MCS rates, use the current average txtime for
* comparison.
*/
if (! (an->an_node.ni_flags & IEEE80211_NODE_HT)) {
if (best_rate_tt == 0 || tt <= best_rate_tt) {
best_rate_tt = tt;
best_rate_rix = rix;
best_rate_pct = pct;
}
}
/*
* Since 2 stream rates have slightly higher TX times,
* allow a little bit of leeway. This should later
* be abstracted out and properly handled.
*/
if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
if (best_rate_tt == 0 || (tt * 8 <= best_rate_tt * 10)) {
best_rate_tt = tt;
best_rate_rix = rix;
best_rate_pct = pct;
}
}
}
return (best_rate_tt ? best_rate_rix : -1);
}
/*
* Pick a good "random" bit-rate to sample other than the current one.
*/
static __inline int
pick_sample_rate(struct sample_softc *ssc , struct ath_node *an,
const HAL_RATE_TABLE *rt, int size_bin)
{
#define DOT11RATE(ix) (rt->info[ix].dot11Rate & IEEE80211_RATE_VAL)
#define MCS(ix) (rt->info[ix].dot11Rate | IEEE80211_RATE_MCS)
struct sample_node *sn = ATH_NODE_SAMPLE(an);
int current_rix, rix;
unsigned current_tt;
uint64_t mask;
current_rix = sn->current_rix[size_bin];
if (current_rix < 0) {
/* no successes yet, send at the lowest bit-rate */
/* XXX should return MCS0 if HT */
return 0;
}
current_tt = sn->stats[size_bin][current_rix].average_tx_time;
rix = sn->last_sample_rix[size_bin]+1; /* next sample rate */
mask = sn->ratemask &~ ((uint64_t) 1<<current_rix);/* don't sample current rate */
while (mask != 0) {
if ((mask & ((uint64_t) 1<<rix)) == 0) { /* not a supported rate */
nextrate:
if (++rix >= rt->rateCount)
rix = 0;
continue;
}
/*
* The following code stops trying to sample
* non-MCS rates when speaking to an MCS node.
* However, at least for CCK rates in 2.4GHz mode,
* the non-MCS rates MAY actually provide better
* PER at the very far edge of reception.
*
* However! Until ath_rate_form_aggr() grows
* some logic to not form aggregates if the
* selected rate is non-MCS, this won't work.
*
* So don't disable this code until you've taught
* ath_rate_form_aggr() to drop out if any of
* the selected rates are non-MCS.
*/
#if 1
/* if the node is HT and the rate isn't HT, don't bother sample */
if ((an->an_node.ni_flags & IEEE80211_NODE_HT) &&
(rt->info[rix].phy != IEEE80211_T_HT)) {
mask &= ~((uint64_t) 1<<rix);
goto nextrate;
}
#endif
/* this bit-rate is always worse than the current one */
if (sn->stats[size_bin][rix].perfect_tx_time > current_tt) {
mask &= ~((uint64_t) 1<<rix);
goto nextrate;
}
/* rarely sample bit-rates that fail a lot */
if (sn->stats[size_bin][rix].successive_failures > ssc->max_successive_failures &&
ticks - sn->stats[size_bin][rix].last_tx < ssc->stale_failure_timeout) {
mask &= ~((uint64_t) 1<<rix);
goto nextrate;
}
/*
* For HT, only sample a few rates on either side of the
* current rix; there's quite likely a lot of them.
*/
if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
if (rix < (current_rix - 3) ||
rix > (current_rix + 3)) {
mask &= ~((uint64_t) 1<<rix);
goto nextrate;
}
}
/* Don't sample more than 2 rates higher for rates > 11M for non-HT rates */
if (! (an->an_node.ni_flags & IEEE80211_NODE_HT)) {
if (DOT11RATE(rix) > 2*11 && rix > current_rix + 2) {
mask &= ~((uint64_t) 1<<rix);
goto nextrate;
}
}
sn->last_sample_rix[size_bin] = rix;
return rix;
}
return current_rix;
#undef DOT11RATE
#undef MCS
}
static int
ath_rate_get_static_rix(struct ath_softc *sc, const struct ieee80211_node *ni)
{
#define RATE(_ix) (ni->ni_rates.rs_rates[(_ix)] & IEEE80211_RATE_VAL)
#define DOT11RATE(_ix) (rt->info[(_ix)].dot11Rate & IEEE80211_RATE_VAL)
#define MCS(_ix) (ni->ni_htrates.rs_rates[_ix] | IEEE80211_RATE_MCS)
const struct ieee80211_txparam *tp = ni->ni_txparms;
int srate;
/* Check MCS rates */
for (srate = ni->ni_htrates.rs_nrates - 1; srate >= 0; srate--) {
if (MCS(srate) == tp->ucastrate)
return sc->sc_rixmap[tp->ucastrate];
}
/* Check legacy rates */
for (srate = ni->ni_rates.rs_nrates - 1; srate >= 0; srate--) {
if (RATE(srate) == tp->ucastrate)
return sc->sc_rixmap[tp->ucastrate];
}
return -1;
#undef RATE
#undef DOT11RATE
#undef MCS
}
static void
ath_rate_update_static_rix(struct ath_softc *sc, struct ieee80211_node *ni)
{
struct ath_node *an = ATH_NODE(ni);
const struct ieee80211_txparam *tp = ni->ni_txparms;
struct sample_node *sn = ATH_NODE_SAMPLE(an);
if (tp != NULL && tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
/*
* A fixed rate is to be used; ucastrate is the IEEE code
* for this rate (sans basic bit). Check this against the
* negotiated rate set for the node. Note the fixed rate
* may not be available for various reasons so we only
* setup the static rate index if the lookup is successful.
*/
sn->static_rix = ath_rate_get_static_rix(sc, ni);
} else {
sn->static_rix = -1;
}
}
/*
* Pick a non-HT rate to begin using.
*/
static int
ath_rate_pick_seed_rate_legacy(struct ath_softc *sc, struct ath_node *an,
int frameLen)
{
#define DOT11RATE(ix) (rt->info[ix].dot11Rate & IEEE80211_RATE_VAL)
#define MCS(ix) (rt->info[ix].dot11Rate | IEEE80211_RATE_MCS)
#define RATE(ix) (DOT11RATE(ix) / 2)
int rix = -1;
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const int size_bin = size_to_bin(frameLen);
/* no packet has been sent successfully yet */
for (rix = rt->rateCount-1; rix > 0; rix--) {
if ((sn->ratemask & ((uint64_t) 1<<rix)) == 0)
continue;
/* Skip HT rates */
if (rt->info[rix].phy == IEEE80211_T_HT)
continue;
/*
* Pick the highest rate <= 36 Mbps
* that hasn't failed.
*/
if (DOT11RATE(rix) <= 72 &&
sn->stats[size_bin][rix].successive_failures == 0) {
break;
}
}
return rix;
#undef RATE
#undef MCS
#undef DOT11RATE
}
/*
* Pick a HT rate to begin using.
*
* Don't use any non-HT rates; only consider HT rates.
*/
static int
ath_rate_pick_seed_rate_ht(struct ath_softc *sc, struct ath_node *an,
int frameLen)
{
#define DOT11RATE(ix) (rt->info[ix].dot11Rate & IEEE80211_RATE_VAL)
#define MCS(ix) (rt->info[ix].dot11Rate | IEEE80211_RATE_MCS)
#define RATE(ix) (DOT11RATE(ix) / 2)
int rix = -1, ht_rix = -1;
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const int size_bin = size_to_bin(frameLen);
/* no packet has been sent successfully yet */
for (rix = rt->rateCount-1; rix > 0; rix--) {
/* Skip rates we can't use */
if ((sn->ratemask & ((uint64_t) 1<<rix)) == 0)
continue;
/* Keep a copy of the last seen HT rate index */
if (rt->info[rix].phy == IEEE80211_T_HT)
ht_rix = rix;
/* Skip non-HT rates */
if (rt->info[rix].phy != IEEE80211_T_HT)
continue;
/*
* Pick a medium-speed rate regardless of stream count
* which has not seen any failures. Higher rates may fail;
* we'll try them later.
*/
if (((MCS(rix) & 0x7) <= 4) &&
sn->stats[size_bin][rix].successive_failures == 0) {
break;
}
}
/*
* If all the MCS rates have successive failures, rix should be
* > 0; otherwise use the lowest MCS rix (hopefully MCS 0.)
*/
return MAX(rix, ht_rix);
#undef RATE
#undef MCS
#undef DOT11RATE
}
void
ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
int shortPreamble, size_t frameLen,
u_int8_t *rix0, int *try0, u_int8_t *txrate)
{
#define DOT11RATE(ix) (rt->info[ix].dot11Rate & IEEE80211_RATE_VAL)
#define MCS(ix) (rt->info[ix].dot11Rate | IEEE80211_RATE_MCS)
#define RATE(ix) (DOT11RATE(ix) / 2)
struct sample_node *sn = ATH_NODE_SAMPLE(an);
struct sample_softc *ssc = ATH_SOFTC_SAMPLE(sc);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const HAL_RATE_TABLE *rt = sc->sc_currates;
const int size_bin = size_to_bin(frameLen);
int rix, mrr, best_rix, change_rates;
unsigned average_tx_time;
ath_rate_update_static_rix(sc, &an->an_node);
if (sn->currates != sc->sc_currates) {
device_printf(sc->sc_dev, "%s: currates != sc_currates!\n",
__func__);
rix = 0;
*try0 = ATH_TXMAXTRY;
goto done;
}
if (sn->static_rix != -1) {
rix = sn->static_rix;
*try0 = ATH_TXMAXTRY;
goto done;
}
mrr = sc->sc_mrretry;
/* XXX check HT protmode too */
if (mrr && (ic->ic_flags & IEEE80211_F_USEPROT && !sc->sc_mrrprot))
mrr = 0;
best_rix = pick_best_rate(an, rt, size_bin, !mrr);
if (best_rix >= 0) {
average_tx_time = sn->stats[size_bin][best_rix].average_tx_time;
} else {
average_tx_time = 0;
}
/*
* Limit the time measuring the performance of other tx
* rates to sample_rate% of the total transmission time.
*/
if (sn->sample_tt[size_bin] < average_tx_time * (sn->packets_since_sample[size_bin]*ssc->sample_rate/100)) {
rix = pick_sample_rate(ssc, an, rt, size_bin);
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node, "att %d sample_tt %d size %u sample rate %d %s current rate %d %s",
average_tx_time,
sn->sample_tt[size_bin],
bin_to_size(size_bin),
dot11rate(rt, rix),
dot11rate_label(rt, rix),
dot11rate(rt, sn->current_rix[size_bin]),
dot11rate_label(rt, sn->current_rix[size_bin]));
if (rix != sn->current_rix[size_bin]) {
sn->current_sample_rix[size_bin] = rix;
} else {
sn->current_sample_rix[size_bin] = -1;
}
sn->packets_since_sample[size_bin] = 0;
} else {
change_rates = 0;
if (!sn->packets_sent[size_bin] || best_rix == -1) {
/* no packet has been sent successfully yet */
change_rates = 1;
if (an->an_node.ni_flags & IEEE80211_NODE_HT)
best_rix =
ath_rate_pick_seed_rate_ht(sc, an, frameLen);
else
best_rix =
ath_rate_pick_seed_rate_legacy(sc, an, frameLen);
} else if (sn->packets_sent[size_bin] < 20) {
/* let the bit-rate switch quickly during the first few packets */
IEEE80211_NOTE(an->an_node.ni_vap,
IEEE80211_MSG_RATECTL, &an->an_node,
"%s: switching quickly..", __func__);
change_rates = 1;
} else if (ticks - ssc->min_switch > sn->ticks_since_switch[size_bin]) {
/* min_switch seconds have gone by */
IEEE80211_NOTE(an->an_node.ni_vap,
IEEE80211_MSG_RATECTL, &an->an_node,
"%s: min_switch %d > ticks_since_switch %d..",
__func__, ticks - ssc->min_switch, sn->ticks_since_switch[size_bin]);
change_rates = 1;
} else if ((! (an->an_node.ni_flags & IEEE80211_NODE_HT)) &&
(2*average_tx_time < sn->stats[size_bin][sn->current_rix[size_bin]].average_tx_time)) {
/* the current bit-rate is twice as slow as the best one */
IEEE80211_NOTE(an->an_node.ni_vap,
IEEE80211_MSG_RATECTL, &an->an_node,
"%s: 2x att (= %d) < cur_rix att %d",
__func__,
2 * average_tx_time, sn->stats[size_bin][sn->current_rix[size_bin]].average_tx_time);
change_rates = 1;
} else if ((an->an_node.ni_flags & IEEE80211_NODE_HT)) {
int cur_rix = sn->current_rix[size_bin];
int cur_att = sn->stats[size_bin][cur_rix].average_tx_time;
/*
* If the node is HT, upgrade it if the MCS rate is
* higher and the average tx time is within 20% of
* the current rate. It can fail a little.
*
* This is likely not optimal!
*/
#if 0
printf("cur rix/att %x/%d, best rix/att %x/%d\n",
MCS(cur_rix), cur_att, MCS(best_rix), average_tx_time);
#endif
if ((MCS(best_rix) > MCS(cur_rix)) &&
(average_tx_time * 8) <= (cur_att * 10)) {
IEEE80211_NOTE(an->an_node.ni_vap,
IEEE80211_MSG_RATECTL, &an->an_node,
"%s: HT: best_rix 0x%d > cur_rix 0x%x, average_tx_time %d, cur_att %d",
__func__,
MCS(best_rix), MCS(cur_rix), average_tx_time, cur_att);
change_rates = 1;
}
}
sn->packets_since_sample[size_bin]++;
if (change_rates) {
if (best_rix != sn->current_rix[size_bin]) {
IEEE80211_NOTE(an->an_node.ni_vap,
IEEE80211_MSG_RATECTL,
&an->an_node,
"%s: size %d switch rate %d (%d/%d) -> %d (%d/%d) after %d packets mrr %d",
__func__,
bin_to_size(size_bin),
RATE(sn->current_rix[size_bin]),
sn->stats[size_bin][sn->current_rix[size_bin]].average_tx_time,
sn->stats[size_bin][sn->current_rix[size_bin]].perfect_tx_time,
RATE(best_rix),
sn->stats[size_bin][best_rix].average_tx_time,
sn->stats[size_bin][best_rix].perfect_tx_time,
sn->packets_since_switch[size_bin],
mrr);
}
sn->packets_since_switch[size_bin] = 0;
sn->current_rix[size_bin] = best_rix;
sn->ticks_since_switch[size_bin] = ticks;
/*
* Set the visible txrate for this node.
*/
an->an_node.ni_txrate = (rt->info[best_rix].phy == IEEE80211_T_HT) ? MCS(best_rix) : DOT11RATE(best_rix);
}
rix = sn->current_rix[size_bin];
sn->packets_since_switch[size_bin]++;
}
*try0 = mrr ? sn->sched[rix].t0 : ATH_TXMAXTRY;
done:
/*
* This bug totally sucks and should be fixed.
*
* For now though, let's not panic, so we can start to figure
* out how to better reproduce it.
*/
if (rix < 0 || rix >= rt->rateCount) {
printf("%s: ERROR: rix %d out of bounds (rateCount=%d)\n",
__func__,
rix,
rt->rateCount);
rix = 0; /* XXX just default for now */
}
KASSERT(rix >= 0 && rix < rt->rateCount, ("rix is %d", rix));
*rix0 = rix;
*txrate = rt->info[rix].rateCode
| (shortPreamble ? rt->info[rix].shortPreamble : 0);
sn->packets_sent[size_bin]++;
#undef DOT11RATE
#undef MCS
#undef RATE
}
/*
* Get the TX rates. Don't fiddle with short preamble flags for them;
* the caller can do that.
*/
void
ath_rate_getxtxrates(struct ath_softc *sc, struct ath_node *an,
uint8_t rix0, struct ath_rc_series *rc)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const struct txschedule *sched = &sn->sched[rix0];
KASSERT(rix0 == sched->r0, ("rix0 (%x) != sched->r0 (%x)!\n",
rix0, sched->r0));
rc[0].flags = rc[1].flags = rc[2].flags = rc[3].flags = 0;
rc[0].rix = sched->r0;
rc[1].rix = sched->r1;
rc[2].rix = sched->r2;
rc[3].rix = sched->r3;
rc[0].tries = sched->t0;
rc[1].tries = sched->t1;
rc[2].tries = sched->t2;
rc[3].tries = sched->t3;
}
void
ath_rate_setupxtxdesc(struct ath_softc *sc, struct ath_node *an,
struct ath_desc *ds, int shortPreamble, u_int8_t rix)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const struct txschedule *sched = &sn->sched[rix];
const HAL_RATE_TABLE *rt = sc->sc_currates;
uint8_t rix1, s1code, rix2, s2code, rix3, s3code;
/* XXX precalculate short preamble tables */
rix1 = sched->r1;
s1code = rt->info[rix1].rateCode
| (shortPreamble ? rt->info[rix1].shortPreamble : 0);
rix2 = sched->r2;
s2code = rt->info[rix2].rateCode
| (shortPreamble ? rt->info[rix2].shortPreamble : 0);
rix3 = sched->r3;
s3code = rt->info[rix3].rateCode
| (shortPreamble ? rt->info[rix3].shortPreamble : 0);
ath_hal_setupxtxdesc(sc->sc_ah, ds,
s1code, sched->t1, /* series 1 */
s2code, sched->t2, /* series 2 */
s3code, sched->t3); /* series 3 */
}
static void
update_stats(struct ath_softc *sc, struct ath_node *an,
int frame_size,
int rix0, int tries0,
int rix1, int tries1,
int rix2, int tries2,
int rix3, int tries3,
int short_tries, int tries, int status,
int nframes, int nbad)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
struct sample_softc *ssc = ATH_SOFTC_SAMPLE(sc);
#ifdef IEEE80211_DEBUG
const HAL_RATE_TABLE *rt = sc->sc_currates;
#endif
const int size_bin = size_to_bin(frame_size);
const int size = bin_to_size(size_bin);
int tt, tries_so_far;
int is_ht40 = (an->an_node.ni_chw == 40);
int pct;
if (!IS_RATE_DEFINED(sn, rix0))
return;
tt = calc_usecs_unicast_packet(sc, size, rix0, short_tries,
MIN(tries0, tries) - 1, is_ht40);
tries_so_far = tries0;
if (tries1 && tries_so_far < tries) {
if (!IS_RATE_DEFINED(sn, rix1))
return;
tt += calc_usecs_unicast_packet(sc, size, rix1, short_tries,
MIN(tries1 + tries_so_far, tries) - tries_so_far - 1, is_ht40);
tries_so_far += tries1;
}
if (tries2 && tries_so_far < tries) {
if (!IS_RATE_DEFINED(sn, rix2))
return;
tt += calc_usecs_unicast_packet(sc, size, rix2, short_tries,
MIN(tries2 + tries_so_far, tries) - tries_so_far - 1, is_ht40);
tries_so_far += tries2;
}
if (tries3 && tries_so_far < tries) {
if (!IS_RATE_DEFINED(sn, rix3))
return;
tt += calc_usecs_unicast_packet(sc, size, rix3, short_tries,
MIN(tries3 + tries_so_far, tries) - tries_so_far - 1, is_ht40);
}
if (sn->stats[size_bin][rix0].total_packets < ssc->smoothing_minpackets) {
/* just average the first few packets */
int avg_tx = sn->stats[size_bin][rix0].average_tx_time;
int packets = sn->stats[size_bin][rix0].total_packets;
sn->stats[size_bin][rix0].average_tx_time = (tt+(avg_tx*packets))/(packets+nframes);
} else {
/* use a ewma */
sn->stats[size_bin][rix0].average_tx_time =
((sn->stats[size_bin][rix0].average_tx_time * ssc->smoothing_rate) +
(tt * (100 - ssc->smoothing_rate))) / 100;
}
/*
* XXX Don't mark the higher bit rates as also having failed; as this
* unfortunately stops those rates from being tasted when trying to
* TX. This happens with 11n aggregation.
*/
if (nframes == nbad) {
#if 0
int y;
#endif
sn->stats[size_bin][rix0].successive_failures += nbad;
#if 0
for (y = size_bin+1; y < NUM_PACKET_SIZE_BINS; y++) {
/*
* Also say larger packets failed since we
* assume if a small packet fails at a
* bit-rate then a larger one will also.
*/
sn->stats[y][rix0].successive_failures += nbad;
sn->stats[y][rix0].last_tx = ticks;
sn->stats[y][rix0].tries += tries;
sn->stats[y][rix0].total_packets += nframes;
}
#endif
} else {
sn->stats[size_bin][rix0].packets_acked += (nframes - nbad);
sn->stats[size_bin][rix0].successive_failures = 0;
}
sn->stats[size_bin][rix0].tries += tries;
sn->stats[size_bin][rix0].last_tx = ticks;
sn->stats[size_bin][rix0].total_packets += nframes;
/* update EWMA for this rix */
/* Calculate percentage based on current rate */
if (nframes == 0)
nframes = nbad = 1;
pct = ((nframes - nbad) * 1000) / nframes;
if (sn->stats[size_bin][rix0].total_packets <
ssc->smoothing_minpackets) {
/* just average the first few packets */
int a_pct = (sn->stats[size_bin][rix0].packets_acked * 1000) /
(sn->stats[size_bin][rix0].total_packets);
sn->stats[size_bin][rix0].ewma_pct = a_pct;
} else {
/* use a ewma */
sn->stats[size_bin][rix0].ewma_pct =
((sn->stats[size_bin][rix0].ewma_pct * ssc->smoothing_rate) +
(pct * (100 - ssc->smoothing_rate))) / 100;
}
if (rix0 == sn->current_sample_rix[size_bin]) {
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node,
"%s: size %d %s sample rate %d %s tries (%d/%d) tt %d avg_tt (%d/%d) nfrm %d nbad %d",
__func__,
size,
status ? "FAIL" : "OK",
dot11rate(rt, rix0),
dot11rate_label(rt, rix0),
short_tries, tries, tt,
sn->stats[size_bin][rix0].average_tx_time,
sn->stats[size_bin][rix0].perfect_tx_time,
nframes, nbad);
sn->sample_tt[size_bin] = tt;
sn->current_sample_rix[size_bin] = -1;
}
}
static void
badrate(struct ath_softc *sc, int series, int hwrate, int tries, int status)
{
device_printf(sc->sc_dev,
"bad series%d hwrate 0x%x, tries %u ts_status 0x%x\n",
series, hwrate, tries, status);
}
void
ath_rate_tx_complete(struct ath_softc *sc, struct ath_node *an,
const struct ath_rc_series *rc, const struct ath_tx_status *ts,
int frame_size, int nframes, int nbad)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct sample_node *sn = ATH_NODE_SAMPLE(an);
int final_rix, short_tries, long_tries;
const HAL_RATE_TABLE *rt = sc->sc_currates;
int status = ts->ts_status;
int mrr;
final_rix = rt->rateCodeToIndex[ts->ts_rate];
short_tries = ts->ts_shortretry;
long_tries = ts->ts_longretry + 1;
if (nframes == 0) {
device_printf(sc->sc_dev, "%s: nframes=0?\n", __func__);
return;
}
if (frame_size == 0) /* NB: should not happen */
frame_size = 1500;
if (sn->ratemask == 0) {
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node,
"%s: size %d %s rate/try %d/%d no rates yet",
__func__,
bin_to_size(size_to_bin(frame_size)),
status ? "FAIL" : "OK",
short_tries, long_tries);
return;
}
mrr = sc->sc_mrretry;
/* XXX check HT protmode too */
if (mrr && (ic->ic_flags & IEEE80211_F_USEPROT && !sc->sc_mrrprot))
mrr = 0;
if (!mrr || ts->ts_finaltsi == 0) {
if (!IS_RATE_DEFINED(sn, final_rix)) {
device_printf(sc->sc_dev,
"%s: ts_rate=%d ts_finaltsi=%d, final_rix=%d\n",
__func__, ts->ts_rate, ts->ts_finaltsi, final_rix);
badrate(sc, 0, ts->ts_rate, long_tries, status);
return;
}
/*
* Only one rate was used; optimize work.
*/
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node, "%s: size %d (%d bytes) %s rate/short/long %d %s/%d/%d nframes/nbad [%d/%d]",
__func__,
bin_to_size(size_to_bin(frame_size)),
frame_size,
status ? "FAIL" : "OK",
dot11rate(rt, final_rix), dot11rate_label(rt, final_rix),
short_tries, long_tries, nframes, nbad);
update_stats(sc, an, frame_size,
final_rix, long_tries,
0, 0,
0, 0,
0, 0,
short_tries, long_tries, status,
nframes, nbad);
} else {
int finalTSIdx = ts->ts_finaltsi;
int i;
/*
* Process intermediate rates that failed.
*/
IEEE80211_NOTE(an->an_node.ni_vap, IEEE80211_MSG_RATECTL,
&an->an_node,
"%s: size %d (%d bytes) finaltsidx %d short %d long %d %s rate/try [%d %s/%d %d %s/%d %d %s/%d %d %s/%d] nframes/nbad [%d/%d]",
__func__,
bin_to_size(size_to_bin(frame_size)),
frame_size,
finalTSIdx,
short_tries,
long_tries,
status ? "FAIL" : "OK",
dot11rate(rt, rc[0].rix),
dot11rate_label(rt, rc[0].rix), rc[0].tries,
dot11rate(rt, rc[1].rix),
dot11rate_label(rt, rc[1].rix), rc[1].tries,
dot11rate(rt, rc[2].rix),
dot11rate_label(rt, rc[2].rix), rc[2].tries,
dot11rate(rt, rc[3].rix),
dot11rate_label(rt, rc[3].rix), rc[3].tries,
nframes, nbad);
for (i = 0; i < 4; i++) {
if (rc[i].tries && !IS_RATE_DEFINED(sn, rc[i].rix))
badrate(sc, 0, rc[i].ratecode, rc[i].tries,
status);
}
/*
* NB: series > 0 are not penalized for failure
* based on the try counts under the assumption
* that losses are often bursty and since we
* sample higher rates 1 try at a time doing so
* may unfairly penalize them.
*/
if (rc[0].tries) {
update_stats(sc, an, frame_size,
rc[0].rix, rc[0].tries,
rc[1].rix, rc[1].tries,
rc[2].rix, rc[2].tries,
rc[3].rix, rc[3].tries,
short_tries, long_tries,
long_tries > rc[0].tries,
nframes, nbad);
long_tries -= rc[0].tries;
}
if (rc[1].tries && finalTSIdx > 0) {
update_stats(sc, an, frame_size,
rc[1].rix, rc[1].tries,
rc[2].rix, rc[2].tries,
rc[3].rix, rc[3].tries,
0, 0,
short_tries, long_tries,
status,
nframes, nbad);
long_tries -= rc[1].tries;
}
if (rc[2].tries && finalTSIdx > 1) {
update_stats(sc, an, frame_size,
rc[2].rix, rc[2].tries,
rc[3].rix, rc[3].tries,
0, 0,
0, 0,
short_tries, long_tries,
status,
nframes, nbad);
long_tries -= rc[2].tries;
}
if (rc[3].tries && finalTSIdx > 2) {
update_stats(sc, an, frame_size,
rc[3].rix, rc[3].tries,
0, 0,
0, 0,
0, 0,
short_tries, long_tries,
status,
nframes, nbad);
}
}
}
void
ath_rate_newassoc(struct ath_softc *sc, struct ath_node *an, int isnew)
{
if (isnew)
ath_rate_ctl_reset(sc, &an->an_node);
}
static const struct txschedule *mrr_schedules[IEEE80211_MODE_MAX+2] = {
NULL, /* IEEE80211_MODE_AUTO */
series_11a, /* IEEE80211_MODE_11A */
series_11g, /* IEEE80211_MODE_11B */
series_11g, /* IEEE80211_MODE_11G */
NULL, /* IEEE80211_MODE_FH */
series_11a, /* IEEE80211_MODE_TURBO_A */
series_11g, /* IEEE80211_MODE_TURBO_G */
series_11a, /* IEEE80211_MODE_STURBO_A */
series_11na, /* IEEE80211_MODE_11NA */
series_11ng, /* IEEE80211_MODE_11NG */
series_half, /* IEEE80211_MODE_HALF */
series_quarter, /* IEEE80211_MODE_QUARTER */
};
/*
* Initialize the tables for a node.
*/
static void
ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
{
#define RATE(_ix) (ni->ni_rates.rs_rates[(_ix)] & IEEE80211_RATE_VAL)
#define DOT11RATE(_ix) (rt->info[(_ix)].dot11Rate & IEEE80211_RATE_VAL)
#define MCS(_ix) (ni->ni_htrates.rs_rates[_ix] | IEEE80211_RATE_MCS)
struct ath_node *an = ATH_NODE(ni);
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const HAL_RATE_TABLE *rt = sc->sc_currates;
int x, y, rix;
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
KASSERT(sc->sc_curmode < IEEE80211_MODE_MAX+2,
("curmode %u", sc->sc_curmode));
sn->sched = mrr_schedules[sc->sc_curmode];
KASSERT(sn->sched != NULL,
("no mrr schedule for mode %u", sc->sc_curmode));
sn->static_rix = -1;
ath_rate_update_static_rix(sc, ni);
sn->currates = sc->sc_currates;
/*
* Construct a bitmask of usable rates. This has all
* negotiated rates minus those marked by the hal as
* to be ignored for doing rate control.
*/
sn->ratemask = 0;
/* MCS rates */
if (ni->ni_flags & IEEE80211_NODE_HT) {
for (x = 0; x < ni->ni_htrates.rs_nrates; x++) {
rix = sc->sc_rixmap[MCS(x)];
if (rix == 0xff)
continue;
/* skip rates marked broken by hal */
if (!rt->info[rix].valid)
continue;
KASSERT(rix < SAMPLE_MAXRATES,
("mcs %u has rix %d", MCS(x), rix));
sn->ratemask |= (uint64_t) 1<<rix;
}
}
/* Legacy rates */
for (x = 0; x < ni->ni_rates.rs_nrates; x++) {
rix = sc->sc_rixmap[RATE(x)];
if (rix == 0xff)
continue;
/* skip rates marked broken by hal */
if (!rt->info[rix].valid)
continue;
KASSERT(rix < SAMPLE_MAXRATES,
("rate %u has rix %d", RATE(x), rix));
sn->ratemask |= (uint64_t) 1<<rix;
}
#ifdef IEEE80211_DEBUG
if (ieee80211_msg(ni->ni_vap, IEEE80211_MSG_RATECTL)) {
uint64_t mask;
ieee80211_note(ni->ni_vap, "[%6D] %s: size 1600 rate/tt",
ni->ni_macaddr, ":", __func__);
for (mask = sn->ratemask, rix = 0; mask != 0; mask >>= 1, rix++) {
if ((mask & 1) == 0)
continue;
printf(" %d %s/%d", dot11rate(rt, rix), dot11rate_label(rt, rix),
calc_usecs_unicast_packet(sc, 1600, rix, 0,0,
(ni->ni_chw == 40)));
}
printf("\n");
}
#endif
for (y = 0; y < NUM_PACKET_SIZE_BINS; y++) {
int size = bin_to_size(y);
uint64_t mask;
sn->packets_sent[y] = 0;
sn->current_sample_rix[y] = -1;
sn->last_sample_rix[y] = 0;
/* XXX start with first valid rate */
sn->current_rix[y] = ffs(sn->ratemask)-1;
/*
* Initialize the statistics buckets; these are
* indexed by the rate code index.
*/
for (rix = 0, mask = sn->ratemask; mask != 0; rix++, mask >>= 1) {
if ((mask & 1) == 0) /* not a valid rate */
continue;
sn->stats[y][rix].successive_failures = 0;
sn->stats[y][rix].tries = 0;
sn->stats[y][rix].total_packets = 0;
sn->stats[y][rix].packets_acked = 0;
sn->stats[y][rix].last_tx = 0;
sn->stats[y][rix].ewma_pct = 0;
sn->stats[y][rix].perfect_tx_time =
calc_usecs_unicast_packet(sc, size, rix, 0, 0,
(ni->ni_chw == 40));
sn->stats[y][rix].average_tx_time =
sn->stats[y][rix].perfect_tx_time;
}
}
#if 0
/* XXX 0, num_rates-1 are wrong */
IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_RATECTL, ni,
"%s: %d rates %d%sMbps (%dus)- %d%sMbps (%dus)", __func__,
sn->num_rates,
DOT11RATE(0)/2, DOT11RATE(0) % 1 ? ".5" : "",
sn->stats[1][0].perfect_tx_time,
DOT11RATE(sn->num_rates-1)/2, DOT11RATE(sn->num_rates-1) % 1 ? ".5" : "",
sn->stats[1][sn->num_rates-1].perfect_tx_time
);
#endif
/* set the visible bit-rate */
if (sn->static_rix != -1)
ni->ni_txrate = DOT11RATE(sn->static_rix);
else
ni->ni_txrate = RATE(0);
#undef RATE
#undef DOT11RATE
}
/*
* Fetch the statistics for the given node.
*
* The ieee80211 node must be referenced and unlocked, however the ath_node
* must be locked.
*
* The main difference here is that we convert the rate indexes
* to 802.11 rates, or the userland output won't make much sense
* as it has no access to the rix table.
*/
int
ath_rate_fetch_node_stats(struct ath_softc *sc, struct ath_node *an,
struct ath_rateioctl *rs)
{
struct sample_node *sn = ATH_NODE_SAMPLE(an);
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct ath_rateioctl_tlv av;
struct ath_rateioctl_rt *tv;
int y;
int o = 0;
ATH_NODE_LOCK_ASSERT(an);
/*
* Ensure there's enough space for the statistics.
*/
if (rs->len <
sizeof(struct ath_rateioctl_tlv) +
sizeof(struct ath_rateioctl_rt) +
sizeof(struct ath_rateioctl_tlv) +
sizeof(struct sample_node)) {
device_printf(sc->sc_dev, "%s: len=%d, too short\n",
__func__,
rs->len);
return (EINVAL);
}
/*
* Take a temporary copy of the sample node state so we can
* modify it before we copy it.
*/
tv = malloc(sizeof(struct ath_rateioctl_rt), M_TEMP,
M_NOWAIT | M_ZERO);
if (tv == NULL) {
return (ENOMEM);
}
/*
* Populate the rate table mapping TLV.
*/
tv->nentries = rt->rateCount;
for (y = 0; y < rt->rateCount; y++) {
tv->ratecode[y] = rt->info[y].dot11Rate & IEEE80211_RATE_VAL;
if (rt->info[y].phy == IEEE80211_T_HT)
tv->ratecode[y] |= IEEE80211_RATE_MCS;
}
o = 0;
/*
* First TLV - rate code mapping
*/
av.tlv_id = ATH_RATE_TLV_RATETABLE;
av.tlv_len = sizeof(struct ath_rateioctl_rt);
copyout(&av, rs->buf + o, sizeof(struct ath_rateioctl_tlv));
o += sizeof(struct ath_rateioctl_tlv);
copyout(tv, rs->buf + o, sizeof(struct ath_rateioctl_rt));
o += sizeof(struct ath_rateioctl_rt);
/*
* Second TLV - sample node statistics
*/
av.tlv_id = ATH_RATE_TLV_SAMPLENODE;
av.tlv_len = sizeof(struct sample_node);
copyout(&av, rs->buf + o, sizeof(struct ath_rateioctl_tlv));
o += sizeof(struct ath_rateioctl_tlv);
/*
* Copy the statistics over to the provided buffer.
*/
copyout(sn, rs->buf + o, sizeof(struct sample_node));
o += sizeof(struct sample_node);
free(tv, M_TEMP);
return (0);
}
static void
sample_stats(void *arg, struct ieee80211_node *ni)
{
struct ath_softc *sc = arg;
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct sample_node *sn = ATH_NODE_SAMPLE(ATH_NODE(ni));
uint64_t mask;
int rix, y;
printf("\n[%s] refcnt %d static_rix (%d %s) ratemask 0x%jx\n",
ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni),
dot11rate(rt, sn->static_rix),
dot11rate_label(rt, sn->static_rix),
(uintmax_t)sn->ratemask);
for (y = 0; y < NUM_PACKET_SIZE_BINS; y++) {
printf("[%4u] cur rix %d (%d %s) since switch: packets %d ticks %u\n",
bin_to_size(y), sn->current_rix[y],
dot11rate(rt, sn->current_rix[y]),
dot11rate_label(rt, sn->current_rix[y]),
sn->packets_since_switch[y], sn->ticks_since_switch[y]);
printf("[%4u] last sample (%d %s) cur sample (%d %s) packets sent %d\n",
bin_to_size(y),
dot11rate(rt, sn->last_sample_rix[y]),
dot11rate_label(rt, sn->last_sample_rix[y]),
dot11rate(rt, sn->current_sample_rix[y]),
dot11rate_label(rt, sn->current_sample_rix[y]),
sn->packets_sent[y]);
printf("[%4u] packets since sample %d sample tt %u\n",
bin_to_size(y), sn->packets_since_sample[y],
sn->sample_tt[y]);
}
for (mask = sn->ratemask, rix = 0; mask != 0; mask >>= 1, rix++) {
if ((mask & 1) == 0)
continue;
for (y = 0; y < NUM_PACKET_SIZE_BINS; y++) {
if (sn->stats[y][rix].total_packets == 0)
continue;
printf("[%2u %s:%4u] %8ju:%-8ju (%3d%%) (EWMA %3d.%1d%%) T %8ju F %4d avg %5u last %u\n",
dot11rate(rt, rix), dot11rate_label(rt, rix),
bin_to_size(y),
(uintmax_t) sn->stats[y][rix].total_packets,
(uintmax_t) sn->stats[y][rix].packets_acked,
(int) ((sn->stats[y][rix].packets_acked * 100ULL) /
sn->stats[y][rix].total_packets),
sn->stats[y][rix].ewma_pct / 10,
sn->stats[y][rix].ewma_pct % 10,
(uintmax_t) sn->stats[y][rix].tries,
sn->stats[y][rix].successive_failures,
sn->stats[y][rix].average_tx_time,
ticks - sn->stats[y][rix].last_tx);
}
}
}
static int
ath_rate_sysctl_stats(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int error, v;
v = 0;
error = sysctl_handle_int(oidp, &v, 0, req);
if (error || !req->newptr)
return error;
ieee80211_iterate_nodes(&ic->ic_sta, sample_stats, sc);
return 0;
}
static int
ath_rate_sysctl_smoothing_rate(SYSCTL_HANDLER_ARGS)
{
struct sample_softc *ssc = arg1;
int rate, error;
rate = ssc->smoothing_rate;
error = sysctl_handle_int(oidp, &rate, 0, req);
if (error || !req->newptr)
return error;
if (!(0 <= rate && rate < 100))
return EINVAL;
ssc->smoothing_rate = rate;
ssc->smoothing_minpackets = 100 / (100 - rate);
return 0;
}
static int
ath_rate_sysctl_sample_rate(SYSCTL_HANDLER_ARGS)
{
struct sample_softc *ssc = arg1;
int rate, error;
rate = ssc->sample_rate;
error = sysctl_handle_int(oidp, &rate, 0, req);
if (error || !req->newptr)
return error;
if (!(2 <= rate && rate <= 100))
return EINVAL;
ssc->sample_rate = rate;
return 0;
}
static void
ath_rate_sysctlattach(struct ath_softc *sc, struct sample_softc *ssc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"smoothing_rate", CTLTYPE_INT | CTLFLAG_RW, ssc, 0,
ath_rate_sysctl_smoothing_rate, "I",
"sample: smoothing rate for avg tx time (%%)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"sample_rate", CTLTYPE_INT | CTLFLAG_RW, ssc, 0,
ath_rate_sysctl_sample_rate, "I",
"sample: percent air time devoted to sampling new rates (%%)");
/* XXX max_successive_failures, stale_failure_timeout, min_switch */
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"sample_stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_rate_sysctl_stats, "I", "sample: print statistics");
}
struct ath_ratectrl *
ath_rate_attach(struct ath_softc *sc)
{
struct sample_softc *ssc;
ssc = malloc(sizeof(struct sample_softc), M_DEVBUF, M_NOWAIT|M_ZERO);
if (ssc == NULL)
return NULL;
ssc->arc.arc_space = sizeof(struct sample_node);
ssc->smoothing_rate = 75; /* ewma percentage ([0..99]) */
ssc->smoothing_minpackets = 100 / (100 - ssc->smoothing_rate);
ssc->sample_rate = 10; /* %time to try diff tx rates */
ssc->max_successive_failures = 3; /* threshold for rate sampling*/
ssc->stale_failure_timeout = 10 * hz; /* 10 seconds */
ssc->min_switch = hz; /* 1 second */
ath_rate_sysctlattach(sc, ssc);
return &ssc->arc;
}
void
ath_rate_detach(struct ath_ratectrl *arc)
{
struct sample_softc *ssc = (struct sample_softc *) arc;
free(ssc, M_DEVBUF);
}
Index: head/sys/dev/ath/ath_rate/sample/sample.h
===================================================================
--- head/sys/dev/ath/ath_rate/sample/sample.h (revision 287196)
+++ head/sys/dev/ath/ath_rate/sample/sample.h (revision 287197)
@@ -1,242 +1,241 @@
/*-
* Copyright (c) 2005 John Bicket
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
/*
* Defintions for the Atheros Wireless LAN controller driver.
*/
#ifndef _DEV_ATH_RATE_SAMPLE_H
#define _DEV_ATH_RATE_SAMPLE_H
/* per-device state */
struct sample_softc {
struct ath_ratectrl arc; /* base class */
int smoothing_rate; /* ewma percentage [0..99] */
int smoothing_minpackets;
int sample_rate; /* %time to try different tx rates */
int max_successive_failures;
int stale_failure_timeout; /* how long to honor max_successive_failures */
int min_switch; /* min time between rate changes */
int min_good_pct; /* min good percentage for a rate to be considered */
};
#define ATH_SOFTC_SAMPLE(sc) ((struct sample_softc *)sc->sc_rc)
struct rate_stats {
unsigned average_tx_time;
int successive_failures;
uint64_t tries;
uint64_t total_packets; /* pkts total since assoc */
uint64_t packets_acked; /* pkts acked since assoc */
int ewma_pct; /* EWMA percentage */
unsigned perfect_tx_time; /* transmit time for 0 retries */
int last_tx;
};
struct txschedule {
uint8_t t0, r0; /* series 0: tries, rate code */
uint8_t t1, r1; /* series 1: tries, rate code */
uint8_t t2, r2; /* series 2: tries, rate code */
uint8_t t3, r3; /* series 3: tries, rate code */
};
/*
* for now, we track performance for three different packet
* size buckets
*/
#define NUM_PACKET_SIZE_BINS 2
static const int packet_size_bins[NUM_PACKET_SIZE_BINS] = { 250, 1600 };
static inline int
bin_to_size(int index)
{
return packet_size_bins[index];
}
/* per-node state */
struct sample_node {
int static_rix; /* rate index of fixed tx rate */
#define SAMPLE_MAXRATES 64 /* NB: corresponds to hal info[32] */
uint64_t ratemask; /* bit mask of valid rate indices */
const struct txschedule *sched; /* tx schedule table */
const HAL_RATE_TABLE *currates;
struct rate_stats stats[NUM_PACKET_SIZE_BINS][SAMPLE_MAXRATES];
int last_sample_rix[NUM_PACKET_SIZE_BINS];
int current_sample_rix[NUM_PACKET_SIZE_BINS];
int packets_sent[NUM_PACKET_SIZE_BINS];
int current_rix[NUM_PACKET_SIZE_BINS];
int packets_since_switch[NUM_PACKET_SIZE_BINS];
unsigned ticks_since_switch[NUM_PACKET_SIZE_BINS];
int packets_since_sample[NUM_PACKET_SIZE_BINS];
unsigned sample_tt[NUM_PACKET_SIZE_BINS];
};
#ifdef _KERNEL
#define ATH_NODE_SAMPLE(an) ((struct sample_node *)&(an)[1])
#define IS_RATE_DEFINED(sn, rix) (((uint64_t) (sn)->ratemask & (1ULL<<((uint64_t) rix))) != 0)
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
#ifndef MAX
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#endif
#define WIFI_CW_MIN 31
#define WIFI_CW_MAX 1023
/*
* Calculate the transmit duration of a frame.
*/
static unsigned calc_usecs_unicast_packet(struct ath_softc *sc,
int length,
int rix, int short_retries,
int long_retries, int is_ht40)
{
const HAL_RATE_TABLE *rt = sc->sc_currates;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int rts, cts;
unsigned t_slot = 20;
unsigned t_difs = 50;
unsigned t_sifs = 10;
int tt = 0;
int x = 0;
int cw = WIFI_CW_MIN;
int cix;
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
if (rix >= rt->rateCount) {
printf("bogus rix %d, max %u, mode %u\n",
rix, rt->rateCount, sc->sc_curmode);
return 0;
}
cix = rt->info[rix].controlRate;
/*
* XXX getting mac/phy level timings should be fixed for turbo
* rates, and there is probably a way to get this from the
* hal...
*/
switch (rt->info[rix].phy) {
case IEEE80211_T_OFDM:
t_slot = 9;
t_sifs = 16;
t_difs = 28;
/* fall through */
case IEEE80211_T_TURBO:
t_slot = 9;
t_sifs = 8;
t_difs = 28;
break;
case IEEE80211_T_HT:
t_slot = 9;
t_sifs = 8;
t_difs = 28;
break;
case IEEE80211_T_DS:
/* fall through to default */
default:
/* pg 205 ieee.802.11.pdf */
t_slot = 20;
t_difs = 50;
t_sifs = 10;
}
rts = cts = 0;
if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
rt->info[rix].phy == IEEE80211_T_OFDM) {
if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
rts = 1;
else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
cts = 1;
cix = rt->info[sc->sc_protrix].controlRate;
}
if (0 /*length > ic->ic_rtsthreshold */) {
rts = 1;
}
if (rts || cts) {
int ctsrate;
int ctsduration = 0;
/* NB: this is intentionally not a runtime check */
KASSERT(cix < rt->rateCount,
("bogus cix %d, max %u, mode %u\n", cix, rt->rateCount,
sc->sc_curmode));
ctsrate = rt->info[cix].rateCode | rt->info[cix].shortPreamble;
if (rts) /* SIFS + CTS */
ctsduration += rt->info[cix].spAckDuration;
/* XXX assumes short preamble */
ctsduration += ath_hal_pkt_txtime(sc->sc_ah, rt, length, rix,
is_ht40, 0);
if (cts) /* SIFS + ACK */
ctsduration += rt->info[cix].spAckDuration;
tt += (short_retries + 1) * ctsduration;
}
tt += t_difs;
/* XXX assumes short preamble */
tt += (long_retries+1)*ath_hal_pkt_txtime(sc->sc_ah, rt, length, rix,
is_ht40, 0);
tt += (long_retries+1)*(t_sifs + rt->info[rix].spAckDuration);
for (x = 0; x <= short_retries + long_retries; x++) {
cw = MIN(WIFI_CW_MAX, (cw + 1) * 2);
tt += (t_slot * cw/2);
}
return tt;
}
#endif /* _KERNEL */
#endif /* _DEV_ATH_RATE_SAMPLE_H */
Index: head/sys/dev/ath/if_ath.c
===================================================================
--- head/sys/dev/ath/if_ath.c (revision 287196)
+++ head/sys/dev/ath/if_ath.c (revision 287197)
@@ -1,7267 +1,7122 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Atheros Wireless LAN controller.
*
* This software is derived from work of Atsushi Onoe; his contribution
* is greatly appreciated.
*/
#include "opt_inet.h"
#include "opt_ath.h"
/*
* This is needed for register operations which are performed
* by the driver - eg, calls to ath_hal_gettsf32().
*
* It's also required for any AH_DEBUG checks in here, eg the
* module dependencies.
*/
#include "opt_ah.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/module.h>
#include <sys/ktr.h>
#include <sys/smp.h> /* for mp_ncpus */
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
#include <dev/ath/ath_hal/ah_diagcodes.h>
#include <dev/ath/if_ath_debug.h>
#include <dev/ath/if_ath_misc.h>
#include <dev/ath/if_ath_tsf.h>
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_sysctl.h>
#include <dev/ath/if_ath_led.h>
#include <dev/ath/if_ath_keycache.h>
#include <dev/ath/if_ath_rx.h>
#include <dev/ath/if_ath_rx_edma.h>
#include <dev/ath/if_ath_tx_edma.h>
#include <dev/ath/if_ath_beacon.h>
#include <dev/ath/if_ath_btcoex.h>
#include <dev/ath/if_ath_spectral.h>
#include <dev/ath/if_ath_lna_div.h>
#include <dev/ath/if_athdfs.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
#ifdef ATH_DEBUG_ALQ
#include <dev/ath/if_ath_alq.h>
#endif
/*
* Only enable this if you're working on PS-POLL support.
*/
#define ATH_SW_PSQ
/*
* ATH_BCBUF determines the number of vap's that can transmit
* beacons and also (currently) the number of vap's that can
* have unique mac addresses/bssid. When staggering beacons
* 4 is probably a good max as otherwise the beacons become
* very closely spaced and there is limited time for cab q traffic
* to go out. You can burst beacons instead but that is not good
* for stations in power save and at some point you really want
* another radio (and channel).
*
* The limit on the number of mac addresses is tied to our use of
* the U/L bit and tracking addresses in a byte; it would be
* worthwhile to allow more for applications like proxy sta.
*/
CTASSERT(ATH_BCBUF <= 8);
static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void ath_vap_delete(struct ieee80211vap *);
-static void ath_init(void *);
-static void ath_stop_locked(struct ifnet *);
-static void ath_stop(struct ifnet *);
+static int ath_init(struct ath_softc *);
+static void ath_stop(struct ath_softc *);
static int ath_reset_vap(struct ieee80211vap *, u_long);
-static int ath_transmit(struct ifnet *ifp, struct mbuf *m);
-static void ath_qflush(struct ifnet *ifp);
+static int ath_transmit(struct ieee80211com *, struct mbuf *);
static int ath_media_change(struct ifnet *);
static void ath_watchdog(void *);
-static int ath_ioctl(struct ifnet *, u_long, caddr_t);
+static int ath_ioctl(struct ieee80211com *, u_long, void *);
+static void ath_parent(struct ieee80211com *);
static void ath_fatal_proc(void *, int);
static void ath_bmiss_vap(struct ieee80211vap *);
static void ath_bmiss_proc(void *, int);
static void ath_key_update_begin(struct ieee80211vap *);
static void ath_key_update_end(struct ieee80211vap *);
static void ath_update_mcast_hw(struct ath_softc *);
static void ath_update_mcast(struct ieee80211com *);
static void ath_update_promisc(struct ieee80211com *);
static void ath_updateslot(struct ieee80211com *);
static void ath_bstuck_proc(void *, int);
static void ath_reset_proc(void *, int);
static int ath_desc_alloc(struct ath_softc *);
static void ath_desc_free(struct ath_softc *);
static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
const uint8_t [IEEE80211_ADDR_LEN]);
static void ath_node_cleanup(struct ieee80211_node *);
static void ath_node_free(struct ieee80211_node *);
static void ath_node_getsignal(const struct ieee80211_node *,
int8_t *, int8_t *);
static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
static int ath_tx_setup(struct ath_softc *, int, int);
static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
static void ath_tx_cleanup(struct ath_softc *);
static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq,
int dosched);
static void ath_tx_proc_q0(void *, int);
static void ath_tx_proc_q0123(void *, int);
static void ath_tx_proc(void *, int);
static void ath_txq_sched_tasklet(void *, int);
static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
static void ath_scan_start(struct ieee80211com *);
static void ath_scan_end(struct ieee80211com *);
static void ath_set_channel(struct ieee80211com *);
#ifdef ATH_ENABLE_11N
static void ath_update_chw(struct ieee80211com *);
#endif /* ATH_ENABLE_11N */
static void ath_calibrate(void *);
static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void ath_setup_stationkey(struct ieee80211_node *);
static void ath_newassoc(struct ieee80211_node *, int);
static int ath_setregdomain(struct ieee80211com *,
struct ieee80211_regdomain *, int,
struct ieee80211_channel []);
static void ath_getradiocaps(struct ieee80211com *, int, int *,
struct ieee80211_channel []);
static int ath_getchannels(struct ath_softc *);
static int ath_rate_setup(struct ath_softc *, u_int mode);
static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
static void ath_announce(struct ath_softc *);
static void ath_dfs_tasklet(void *, int);
static void ath_node_powersave(struct ieee80211_node *, int);
static int ath_node_set_tim(struct ieee80211_node *, int);
static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *);
#ifdef IEEE80211_SUPPORT_TDMA
#include <dev/ath/if_ath_tdma.h>
#endif
SYSCTL_DECL(_hw_ath);
/* XXX validate sysctl values */
static int ath_longcalinterval = 30; /* long cals every 30 secs */
SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
0, "long chip calibration interval (secs)");
static int ath_shortcalinterval = 100; /* short cals every 100 ms */
SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
0, "short chip calibration interval (msecs)");
static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
0, "reset chip calibration results (secs)");
static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
0, "ANI calibration (msecs)");
int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf,
0, "rx buffers allocated");
int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf,
0, "tx buffers allocated");
int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt,
0, "tx (mgmt) buffers allocated");
int ath_bstuck_threshold = 4; /* max missed beacons */
SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
0, "max missed beacon xmits before chip reset");
MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
void
ath_legacy_attach_comp_func(struct ath_softc *sc)
{
/*
* Special case certain configurations. Note the
* CAB queue is handled by these specially so don't
* include them when checking the txq setup mask.
*/
switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
case 0x01:
TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
break;
case 0x0f:
TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
break;
default:
TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
break;
}
}
/*
* Set the target power mode.
*
* If this is called during a point in time where
* the hardware is being programmed elsewhere, it will
* simply store it away and update it when all current
* uses of the hardware are completed.
*/
void
_ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line)
{
ATH_LOCK_ASSERT(sc);
sc->sc_target_powerstate = power_state;
DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
__func__,
file,
line,
power_state,
sc->sc_powersave_refcnt);
if (sc->sc_powersave_refcnt == 0 &&
power_state != sc->sc_cur_powerstate) {
sc->sc_cur_powerstate = power_state;
ath_hal_setpower(sc->sc_ah, power_state);
/*
* If the NIC is force-awake, then set the
* self-gen frame state appropriately.
*
* If the nic is in network sleep or full-sleep,
* we let the above call leave the self-gen
* state as "sleep".
*/
if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
ath_hal_setselfgenpower(sc->sc_ah,
sc->sc_target_selfgen_state);
}
}
}
/*
* Set the current self-generated frames state.
*
* This is separate from the target power mode. The chip may be
* awake but the desired state is "sleep", so frames sent to the
* destination has PWRMGT=1 in the 802.11 header. The NIC also
* needs to know to set PWRMGT=1 in self-generated frames.
*/
void
_ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line)
{
ATH_LOCK_ASSERT(sc);
DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
__func__,
file,
line,
power_state,
sc->sc_target_selfgen_state);
sc->sc_target_selfgen_state = power_state;
/*
* If the NIC is force-awake, then set the power state.
* Network-state and full-sleep will already transition it to
* mark self-gen frames as sleeping - and we can't
* guarantee the NIC is awake to program the self-gen frame
* setting anyway.
*/
if (sc->sc_cur_powerstate == HAL_PM_AWAKE) {
ath_hal_setselfgenpower(sc->sc_ah, power_state);
}
}
/*
* Set the hardware power mode and take a reference.
*
* This doesn't update the target power mode in the driver;
* it just updates the hardware power state.
*
* XXX it should only ever force the hardware awake; it should
* never be called to set it asleep.
*/
void
_ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line)
{
ATH_LOCK_ASSERT(sc);
DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
__func__,
file,
line,
power_state,
sc->sc_powersave_refcnt);
sc->sc_powersave_refcnt++;
if (power_state != sc->sc_cur_powerstate) {
ath_hal_setpower(sc->sc_ah, power_state);
sc->sc_cur_powerstate = power_state;
/*
* Adjust the self-gen powerstate if appropriate.
*/
if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
ath_hal_setselfgenpower(sc->sc_ah,
sc->sc_target_selfgen_state);
}
}
}
/*
* Restore the power save mode to what it once was.
*
* This will decrement the reference counter and once it hits
* zero, it'll restore the powersave state.
*/
void
_ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line)
{
ATH_LOCK_ASSERT(sc);
DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n",
__func__,
file,
line,
sc->sc_powersave_refcnt,
sc->sc_target_powerstate);
if (sc->sc_powersave_refcnt == 0)
device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__);
else
sc->sc_powersave_refcnt--;
if (sc->sc_powersave_refcnt == 0 &&
sc->sc_target_powerstate != sc->sc_cur_powerstate) {
sc->sc_cur_powerstate = sc->sc_target_powerstate;
ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate);
}
/*
* Adjust the self-gen powerstate if appropriate.
*/
if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
ath_hal_setselfgenpower(sc->sc_ah,
sc->sc_target_selfgen_state);
}
}
/*
* Configure the initial HAL configuration values based on bus
* specific parameters.
*
* Some PCI IDs and other information may need tweaking.
*
* XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable
* if BT antenna diversity isn't enabled.
*
* So, let's also figure out how to enable BT diversity for AR9485.
*/
static void
ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config)
{
/* XXX TODO: only for PCI devices? */
if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) {
ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */
ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE;
ah_config->ath_hal_min_gainidx = AH_TRUE;
ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88;
/* XXX low_rssi_thresh */
/* XXX fast_div_bias */
device_printf(sc->sc_dev, "configuring for %s\n",
(sc->sc_pci_devinfo & ATH_PCI_CUS198) ?
"CUS198" : "CUS230");
}
if (sc->sc_pci_devinfo & ATH_PCI_CUS217)
device_printf(sc->sc_dev, "CUS217 card detected\n");
if (sc->sc_pci_devinfo & ATH_PCI_CUS252)
device_printf(sc->sc_dev, "CUS252 card detected\n");
if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT)
device_printf(sc->sc_dev, "WB335 1-ANT card detected\n");
if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT)
device_printf(sc->sc_dev, "WB335 2-ANT card detected\n");
if (sc->sc_pci_devinfo & ATH_PCI_KILLER)
device_printf(sc->sc_dev, "Killer Wireless card detected\n");
#if 0
/*
* Some WB335 cards do not support antenna diversity. Since
* we use a hardcoded value for AR9565 instead of using the
* EEPROM/OTP data, remove the combining feature from
* the HW capabilities bitmap.
*/
if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV))
pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
}
if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) {
pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n");
}
#endif
if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) {
ah_config->ath_hal_pcie_waen = 0x0040473b;
device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n");
}
#if 0
if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) {
ah->config.no_pll_pwrsave = true;
device_printf(sc->sc_dev, "Disable PLL PowerSave\n");
}
#endif
}
/*
* Attempt to fetch the MAC address from the kernel environment.
*
* Returns 0, macaddr in macaddr if successful; -1 otherwise.
*/
static int
ath_fetch_mac_kenv(struct ath_softc *sc, uint8_t *macaddr)
{
char devid_str[32];
int local_mac = 0;
char *local_macstr;
/*
* Fetch from the kenv rather than using hints.
*
* Hints would be nice but the transition to dynamic
* hints/kenv doesn't happen early enough for this
* to work reliably (eg on anything embedded.)
*/
snprintf(devid_str, 32, "hint.%s.%d.macaddr",
device_get_name(sc->sc_dev),
device_get_unit(sc->sc_dev));
if ((local_macstr = kern_getenv(devid_str)) != NULL) {
uint32_t tmpmac[ETHER_ADDR_LEN];
int count;
int i;
/* Have a MAC address; should use it */
device_printf(sc->sc_dev,
"Overriding MAC address from environment: '%s'\n",
local_macstr);
/* Extract out the MAC address */
count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
&tmpmac[0], &tmpmac[1],
&tmpmac[2], &tmpmac[3],
&tmpmac[4], &tmpmac[5]);
if (count == 6) {
/* Valid! */
local_mac = 1;
for (i = 0; i < ETHER_ADDR_LEN; i++)
macaddr[i] = tmpmac[i];
}
/* Done! */
freeenv(local_macstr);
local_macstr = NULL;
}
if (local_mac)
return (0);
return (-1);
}
#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
#define HAL_MODE_HT40 \
(HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
int
ath_attach(u_int16_t devid, struct ath_softc *sc)
{
- struct ifnet *ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = NULL;
HAL_STATUS status;
int error = 0, i;
u_int wmodes;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
int rx_chainmask, tx_chainmask;
HAL_OPS_CONFIG ah_config;
DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
- CURVNET_SET(vnet0);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- error = ENOSPC;
- CURVNET_RESTORE();
- goto bad;
- }
- ic = ifp->if_l2com;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(sc->sc_dev);
- if_initname(ifp, device_get_name(sc->sc_dev),
- device_get_unit(sc->sc_dev));
- CURVNET_RESTORE();
-
/*
* Configure the initial configuration data.
*
* This is stuff that may be needed early during attach
* rather than done via configuration calls later.
*/
bzero(&ah_config, sizeof(ah_config));
ath_setup_hal_config(sc, &ah_config);
ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
sc->sc_eepromdata, &ah_config, &status);
if (ah == NULL) {
device_printf(sc->sc_dev,
"unable to attach hardware; HAL status %u\n", status);
error = ENXIO;
goto bad;
}
sc->sc_ah = ah;
sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
#ifdef ATH_DEBUG
sc->sc_debug = ath_debug;
#endif
/*
* Setup the DMA/EDMA functions based on the current
* hardware support.
*
* This is required before the descriptors are allocated.
*/
if (ath_hal_hasedma(sc->sc_ah)) {
sc->sc_isedma = 1;
ath_recv_setup_edma(sc);
ath_xmit_setup_edma(sc);
} else {
ath_recv_setup_legacy(sc);
ath_xmit_setup_legacy(sc);
}
if (ath_hal_hasmybeacon(sc->sc_ah)) {
sc->sc_do_mybeacon = 1;
}
/*
* Check if the MAC has multi-rate retry support.
* We do this by trying to setup a fake extended
* descriptor. MAC's that don't have support will
* return false w/o doing anything. MAC's that do
* support it will return true w/o doing anything.
*/
sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
/*
* Check if the device has hardware counters for PHY
* errors. If so we need to enable the MIB interrupt
* so we can act on stat triggers.
*/
if (ath_hal_hwphycounters(ah))
sc->sc_needmib = 1;
/*
* Get the hardware key cache size.
*/
sc->sc_keymax = ath_hal_keycachesize(ah);
if (sc->sc_keymax > ATH_KEYMAX) {
device_printf(sc->sc_dev,
"Warning, using only %u of %u key cache slots\n",
ATH_KEYMAX, sc->sc_keymax);
sc->sc_keymax = ATH_KEYMAX;
}
/*
* Reset the key cache since some parts do not
* reset the contents on initial power up.
*/
for (i = 0; i < sc->sc_keymax; i++)
ath_hal_keyreset(ah, i);
/*
* Collect the default channel list.
*/
error = ath_getchannels(sc);
if (error != 0)
goto bad;
/*
* Setup rate tables for all potential media types.
*/
ath_rate_setup(sc, IEEE80211_MODE_11A);
ath_rate_setup(sc, IEEE80211_MODE_11B);
ath_rate_setup(sc, IEEE80211_MODE_11G);
ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
ath_rate_setup(sc, IEEE80211_MODE_11NA);
ath_rate_setup(sc, IEEE80211_MODE_11NG);
ath_rate_setup(sc, IEEE80211_MODE_HALF);
ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
/* NB: setup here so ath_rate_update is happy */
ath_setcurmode(sc, IEEE80211_MODE_11A);
/*
* Allocate TX descriptors and populate the lists.
*/
error = ath_desc_alloc(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"failed to allocate TX descriptors: %d\n", error);
goto bad;
}
error = ath_txdma_setup(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"failed to allocate TX descriptors: %d\n", error);
goto bad;
}
/*
* Allocate RX descriptors and populate the lists.
*/
error = ath_rxdma_setup(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"failed to allocate RX descriptors: %d\n", error);
goto bad;
}
callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
ATH_TXBUF_LOCK_INIT(sc);
sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->sc_tq);
- taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
- "%s taskq", ifp->if_xname);
+ taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
+ device_get_nameunit(sc->sc_dev));
TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);
TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc);
TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc);
/*
* Allocate hardware transmit queues: one queue for
* beacon frames and one data queue for each QoS
* priority. Note that the hal handles resetting
* these queues at the needed time.
*
* XXX PS-Poll
*/
sc->sc_bhalq = ath_beaconq_setup(sc);
if (sc->sc_bhalq == (u_int) -1) {
device_printf(sc->sc_dev,
"unable to setup a beacon xmit queue!\n");
error = EIO;
goto bad2;
}
sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
if (sc->sc_cabq == NULL) {
device_printf(sc->sc_dev, "unable to setup CAB xmit queue!\n");
error = EIO;
goto bad2;
}
/* NB: insure BK queue is the lowest priority h/w queue */
if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
device_printf(sc->sc_dev,
"unable to setup xmit queue for %s traffic!\n",
ieee80211_wme_acnames[WME_AC_BK]);
error = EIO;
goto bad2;
}
if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
!ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
!ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
/*
* Not enough hardware tx queues to properly do WME;
* just punt and assign them all to the same h/w queue.
* We could do a better job of this if, for example,
* we allocate queues when we switch from station to
* AP mode.
*/
if (sc->sc_ac2q[WME_AC_VI] != NULL)
ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
if (sc->sc_ac2q[WME_AC_BE] != NULL)
ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
}
/*
* Attach the TX completion function.
*
* The non-EDMA chips may have some special case optimisations;
* this method gives everyone a chance to attach cleanly.
*/
sc->sc_tx.xmit_attach_comp_func(sc);
/*
* Setup rate control. Some rate control modules
* call back to change the anntena state so expose
* the necessary entry points.
* XXX maybe belongs in struct ath_ratectrl?
*/
sc->sc_setdefantenna = ath_setdefantenna;
sc->sc_rc = ath_rate_attach(sc);
if (sc->sc_rc == NULL) {
error = EIO;
goto bad2;
}
/* Attach DFS module */
if (! ath_dfs_attach(sc)) {
device_printf(sc->sc_dev,
"%s: unable to attach DFS\n", __func__);
error = EIO;
goto bad2;
}
/* Attach spectral module */
if (ath_spectral_attach(sc) < 0) {
device_printf(sc->sc_dev,
"%s: unable to attach spectral\n", __func__);
error = EIO;
goto bad2;
}
/* Attach bluetooth coexistence module */
if (ath_btcoex_attach(sc) < 0) {
device_printf(sc->sc_dev,
"%s: unable to attach bluetooth coexistence\n", __func__);
error = EIO;
goto bad2;
}
/* Attach LNA diversity module */
if (ath_lna_div_attach(sc) < 0) {
device_printf(sc->sc_dev,
"%s: unable to attach LNA diversity\n", __func__);
error = EIO;
goto bad2;
}
/* Start DFS processing tasklet */
TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
/* Configure LED state */
sc->sc_blinking = 0;
sc->sc_ledstate = 1;
sc->sc_ledon = 0; /* low true */
sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
callout_init(&sc->sc_ledtimer, 1);
/*
* Don't setup hardware-based blinking.
*
* Although some NICs may have this configured in the
* default reset register values, the user may wish
* to alter which pins have which function.
*
* The reference driver attaches the MAC network LED to GPIO1 and
* the MAC power LED to GPIO2. However, the DWA-552 cardbus
* NIC has these reversed.
*/
sc->sc_hardled = (1 == 0);
sc->sc_led_net_pin = -1;
sc->sc_led_pwr_pin = -1;
/*
* Auto-enable soft led processing for IBM cards and for
* 5211 minipci cards. Users can also manually enable/disable
* support with a sysctl.
*/
sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
ath_led_config(sc);
ath_hal_setledstate(ah, HAL_LED_INIT);
- ifp->if_softc = sc;
- ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
- ifp->if_transmit = ath_transmit;
- ifp->if_qflush = ath_qflush;
- ifp->if_ioctl = ath_ioctl;
- ifp->if_init = ath_init;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
/* XXX not right but it's not used anywhere important */
ic->ic_phytype = IEEE80211_T_OFDM;
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_caps =
IEEE80211_C_STA /* station mode */
| IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
| IEEE80211_C_HOSTAP /* hostap mode */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
| IEEE80211_C_WDS /* 4-address traffic works */
| IEEE80211_C_MBSS /* mesh point link mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
#ifndef ATH_ENABLE_11N
| IEEE80211_C_BGSCAN /* capable of bg scanning */
#endif
| IEEE80211_C_TXFRAG /* handle tx frags */
#ifdef ATH_ENABLE_DFS
| IEEE80211_C_DFS /* Enable radar detection */
#endif
| IEEE80211_C_PMGT /* Station side power mgmt */
| IEEE80211_C_SWSLEEP
;
/*
* Query the hal to figure out h/w crypto support.
*/
if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
/*
* Check if h/w does the MIC and/or whether the
* separate key cache entries are required to
* handle both tx+rx MIC keys.
*/
if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
/*
* If the h/w supports storing tx+rx MIC keys
* in one cache slot automatically enable use.
*/
if (ath_hal_hastkipsplit(ah) ||
!ath_hal_settkipsplit(ah, AH_FALSE))
sc->sc_splitmic = 1;
/*
* If the h/w can do TKIP MIC together with WME then
* we use it; otherwise we force the MIC to be done
* in software by the net80211 layer.
*/
if (ath_hal_haswmetkipmic(ah))
sc->sc_wmetkipmic = 1;
}
sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
/*
* Check for multicast key search support.
*/
if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
!ath_hal_getmcastkeysearch(sc->sc_ah)) {
ath_hal_setmcastkeysearch(sc->sc_ah, 1);
}
sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
/*
* Mark key cache slots associated with global keys
* as in use. If we knew TKIP was not to be used we
* could leave the +32, +64, and +32+64 slots free.
*/
for (i = 0; i < IEEE80211_WEP_NKID; i++) {
setbit(sc->sc_keymap, i);
setbit(sc->sc_keymap, i+64);
if (sc->sc_splitmic) {
setbit(sc->sc_keymap, i+32);
setbit(sc->sc_keymap, i+32+64);
}
}
/*
* TPC support can be done either with a global cap or
* per-packet support. The latter is not available on
* all parts. We're a bit pedantic here as all parts
* support a global cap.
*/
if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
ic->ic_caps |= IEEE80211_C_TXPMGT;
/*
* Mark WME capability only if we have sufficient
* hardware queues to do proper priority scheduling.
*/
if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
ic->ic_caps |= IEEE80211_C_WME;
/*
* Check for misc other capabilities.
*/
if (ath_hal_hasbursting(ah))
ic->ic_caps |= IEEE80211_C_BURST;
sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah);
sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah);
sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah);
if (ath_hal_hasfastframes(ah))
ic->ic_caps |= IEEE80211_C_FF;
wmodes = ath_hal_getwirelessmodes(ah);
if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
ic->ic_caps |= IEEE80211_C_TURBOP;
#ifdef IEEE80211_SUPPORT_TDMA
if (ath_hal_macversion(ah) > 0x78) {
ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
ic->ic_tdma_update = ath_tdma_update;
}
#endif
/*
* TODO: enforce that at least this many frames are available
* in the txbuf list before allowing data frames (raw or
* otherwise) to be transmitted.
*/
sc->sc_txq_data_minfree = 10;
/*
* Leave this as default to maintain legacy behaviour.
* Shortening the cabq/mcastq may end up causing some
* undesirable behaviour.
*/
sc->sc_txq_mcastq_maxdepth = ath_txbuf;
/*
* How deep can the node software TX queue get whilst it's asleep.
*/
sc->sc_txq_node_psq_maxdepth = 16;
/*
* Default the maximum queue depth for a given node
* to 1/4'th the TX buffers, or 64, whichever
* is larger.
*/
sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4);
/* Enable CABQ by default */
sc->sc_cabq_enable = 1;
/*
* Allow the TX and RX chainmasks to be overridden by
* environment variables and/or device.hints.
*
* This must be done early - before the hardware is
* calibrated or before the 802.11n stream calculation
* is done.
*/
if (resource_int_value(device_get_name(sc->sc_dev),
device_get_unit(sc->sc_dev), "rx_chainmask",
&rx_chainmask) == 0) {
device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n",
rx_chainmask);
(void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask);
}
if (resource_int_value(device_get_name(sc->sc_dev),
device_get_unit(sc->sc_dev), "tx_chainmask",
&tx_chainmask) == 0) {
device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n",
tx_chainmask);
(void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask);
}
/*
* Query the TX/RX chainmask configuration.
*
* This is only relevant for 11n devices.
*/
ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
/*
* Disable MRR with protected frames by default.
* Only 802.11n series NICs can handle this.
*/
sc->sc_mrrprot = 0; /* XXX should be a capability */
/*
* Query the enterprise mode information the HAL.
*/
if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0,
&sc->sc_ent_cfg) == HAL_OK)
sc->sc_use_ent = 1;
#ifdef ATH_ENABLE_11N
/*
* Query HT capabilities
*/
if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
(wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
uint32_t rxs, txs;
device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
sc->sc_mrrprot = 1; /* XXX should be a capability */
ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
| IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
| IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
| IEEE80211_HTCAP_MAXAMSDU_3839
/* max A-MSDU length */
| IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
;
/*
* Enable short-GI for HT20 only if the hardware
* advertises support.
* Notably, anything earlier than the AR9287 doesn't.
*/
if ((ath_hal_getcapability(ah,
HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
(wmodes & HAL_MODE_HT20)) {
device_printf(sc->sc_dev,
"[HT] enabling short-GI in 20MHz mode\n");
ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
}
if (wmodes & HAL_MODE_HT40)
ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
| IEEE80211_HTCAP_SHORTGI40;
/*
* TX/RX streams need to be taken into account when
* negotiating which MCS rates it'll receive and
* what MCS rates are available for TX.
*/
(void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs);
(void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs);
ic->ic_txstream = txs;
ic->ic_rxstream = rxs;
/*
* Setup TX and RX STBC based on what the HAL allows and
* the currently configured chainmask set.
* Ie - don't enable STBC TX if only one chain is enabled.
* STBC RX is fine on a single RX chain; it just won't
* provide any real benefit.
*/
if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0,
NULL) == HAL_OK) {
sc->sc_rx_stbc = 1;
device_printf(sc->sc_dev,
"[HT] 1 stream STBC receive enabled\n");
ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM;
}
if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0,
NULL) == HAL_OK) {
sc->sc_tx_stbc = 1;
device_printf(sc->sc_dev,
"[HT] 1 stream STBC transmit enabled\n");
ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC;
}
(void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1,
&sc->sc_rts_aggr_limit);
if (sc->sc_rts_aggr_limit != (64 * 1024))
device_printf(sc->sc_dev,
"[HT] RTS aggregates limited to %d KiB\n",
sc->sc_rts_aggr_limit / 1024);
device_printf(sc->sc_dev,
"[HT] %d RX streams; %d TX streams\n", rxs, txs);
}
#endif
/*
* Initial aggregation settings.
*/
sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH;
sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH;
sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
sc->sc_aggr_limit = ATH_AGGR_MAXSIZE;
sc->sc_delim_min_pad = 0;
/*
* Check if the hardware requires PCI register serialisation.
* Some of the Owl based MACs require this.
*/
if (mp_ncpus > 1 &&
ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
0, NULL) == HAL_OK) {
sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
device_printf(sc->sc_dev,
"Enabling register serialisation\n");
}
/*
* Initialise the deferred completed RX buffer list.
*/
TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
/*
* Indicate we need the 802.11 header padded to a
* 32-bit boundary for 4-address and QoS frames.
*/
ic->ic_flags |= IEEE80211_F_DATAPAD;
/*
* Query the hal about antenna support.
*/
sc->sc_defant = ath_hal_getdefantenna(ah);
/*
* Not all chips have the VEOL support we want to
* use with IBSS beacons; check here for it.
*/
sc->sc_hasveol = ath_hal_hasveol(ah);
/* get mac address from kenv first, then hardware */
- if (ath_fetch_mac_kenv(sc, macaddr) == 0) {
+ if (ath_fetch_mac_kenv(sc, ic->ic_macaddr) == 0) {
/* Tell the HAL now about the new MAC */
- ath_hal_setmac(ah, macaddr);
+ ath_hal_setmac(ah, ic->ic_macaddr);
} else {
- ath_hal_getmac(ah, macaddr);
+ ath_hal_getmac(ah, ic->ic_macaddr);
}
if (sc->sc_hasbmask)
ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
/* NB: used to size node table key mapping array */
ic->ic_max_keyix = sc->sc_keymax;
/* call MI attach routine. */
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
ic->ic_setregdomain = ath_setregdomain;
ic->ic_getradiocaps = ath_getradiocaps;
sc->sc_opmode = HAL_M_STA;
/* override default methods */
+ ic->ic_ioctl = ath_ioctl;
+ ic->ic_parent = ath_parent;
+ ic->ic_transmit = ath_transmit;
ic->ic_newassoc = ath_newassoc;
ic->ic_updateslot = ath_updateslot;
ic->ic_wme.wme_update = ath_wme_update;
ic->ic_vap_create = ath_vap_create;
ic->ic_vap_delete = ath_vap_delete;
ic->ic_raw_xmit = ath_raw_xmit;
ic->ic_update_mcast = ath_update_mcast;
ic->ic_update_promisc = ath_update_promisc;
ic->ic_node_alloc = ath_node_alloc;
sc->sc_node_free = ic->ic_node_free;
ic->ic_node_free = ath_node_free;
sc->sc_node_cleanup = ic->ic_node_cleanup;
ic->ic_node_cleanup = ath_node_cleanup;
ic->ic_node_getsignal = ath_node_getsignal;
ic->ic_scan_start = ath_scan_start;
ic->ic_scan_end = ath_scan_end;
ic->ic_set_channel = ath_set_channel;
#ifdef ATH_ENABLE_11N
/* 802.11n specific - but just override anyway */
sc->sc_addba_request = ic->ic_addba_request;
sc->sc_addba_response = ic->ic_addba_response;
sc->sc_addba_stop = ic->ic_addba_stop;
sc->sc_bar_response = ic->ic_bar_response;
sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
ic->ic_addba_request = ath_addba_request;
ic->ic_addba_response = ath_addba_response;
ic->ic_addba_response_timeout = ath_addba_response_timeout;
ic->ic_addba_stop = ath_addba_stop;
ic->ic_bar_response = ath_bar_response;
ic->ic_update_chw = ath_update_chw;
#endif /* ATH_ENABLE_11N */
#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
/*
* There's one vendor bitmap entry in the RX radiotap
* header; make sure that's taken into account.
*/
ieee80211_radiotap_attachv(ic,
&sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0,
ATH_TX_RADIOTAP_PRESENT,
&sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1,
ATH_RX_RADIOTAP_PRESENT);
#else
/*
* No vendor bitmap/extensions are present.
*/
ieee80211_radiotap_attach(ic,
&sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
ATH_TX_RADIOTAP_PRESENT,
&sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
ATH_RX_RADIOTAP_PRESENT);
#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
/*
* Setup the ALQ logging if required
*/
#ifdef ATH_DEBUG_ALQ
if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev));
if_ath_alq_setcfg(&sc->sc_alq,
sc->sc_ah->ah_macVersion,
sc->sc_ah->ah_macRev,
sc->sc_ah->ah_phyRev,
sc->sc_ah->ah_magic);
#endif
/*
* Setup dynamic sysctl's now that country code and
* regdomain are available from the hal.
*/
ath_sysctlattach(sc);
ath_sysctl_stats_attach(sc);
ath_sysctl_hal_attach(sc);
if (bootverbose)
ieee80211_announce(ic);
ath_announce(sc);
/*
* Put it to sleep for now.
*/
ATH_LOCK(sc);
ath_power_setpower(sc, HAL_PM_FULL_SLEEP);
ATH_UNLOCK(sc);
return 0;
bad2:
ath_tx_cleanup(sc);
ath_desc_free(sc);
ath_txdma_teardown(sc);
ath_rxdma_teardown(sc);
bad:
if (ah)
ath_hal_detach(ah);
-
- /*
- * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE..
- */
- if (ifp != NULL && ifp->if_vnet) {
- CURVNET_SET(ifp->if_vnet);
- if_free(ifp);
- CURVNET_RESTORE();
- } else if (ifp != NULL)
- if_free(ifp);
sc->sc_invalid = 1;
return error;
}
int
ath_detach(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
-
/*
* NB: the order of these is important:
* o stop the chip so no more interrupts will fire
* o call the 802.11 layer before detaching the hal to
* insure callbacks into the driver to delete global
* key cache entries can be handled
* o free the taskqueue which drains any pending tasks
* o reclaim the tx queue data structures after calling
* the 802.11 layer as we'll get called back to reclaim
* node state and potentially want to use them
* o to cleanup the tx queues the hal is called, so detach
* it last
* Other than that, it's straightforward...
*/
/*
* XXX Wake the hardware up first. ath_stop() will still
* wake it up first, but I'd rather do it here just to
* ensure it's awake.
*/
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ath_power_setpower(sc, HAL_PM_AWAKE);
- ATH_UNLOCK(sc);
/*
* Stop things cleanly.
*/
- ath_stop(ifp);
+ ath_stop(sc);
+ ATH_UNLOCK(sc);
- ieee80211_ifdetach(ifp->if_l2com);
+ ieee80211_ifdetach(&sc->sc_ic);
taskqueue_free(sc->sc_tq);
#ifdef ATH_TX99_DIAG
if (sc->sc_tx99 != NULL)
sc->sc_tx99->detach(sc->sc_tx99);
#endif
ath_rate_detach(sc->sc_rc);
#ifdef ATH_DEBUG_ALQ
if_ath_alq_tidyup(&sc->sc_alq);
#endif
ath_lna_div_detach(sc);
ath_btcoex_detach(sc);
ath_spectral_detach(sc);
ath_dfs_detach(sc);
ath_desc_free(sc);
ath_txdma_teardown(sc);
ath_rxdma_teardown(sc);
ath_tx_cleanup(sc);
ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
- CURVNET_SET(ifp->if_vnet);
- if_free(ifp);
- CURVNET_RESTORE();
-
return 0;
}
/*
* MAC address handling for multiple BSS on the same radio.
* The first vap uses the MAC address from the EEPROM. For
* subsequent vap's we set the U/L bit (bit 1) in the MAC
* address and use the next six bits as an index.
*/
static void
assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
{
int i;
if (clone && sc->sc_hasbmask) {
/* NB: we only do this if h/w supports multiple bssid */
for (i = 0; i < 8; i++)
if ((sc->sc_bssidmask & (1<<i)) == 0)
break;
if (i != 0)
mac[0] |= (i << 2)|0x2;
} else
i = 0;
sc->sc_bssidmask |= 1<<i;
sc->sc_hwbssidmask[0] &= ~mac[0];
if (i == 0)
sc->sc_nbssid0++;
}
static void
reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
{
int i = mac[0] >> 2;
uint8_t mask;
if (i != 0 || --sc->sc_nbssid0 == 0) {
sc->sc_bssidmask &= ~(1<<i);
/* recalculate bssid mask from remaining addresses */
mask = 0xff;
for (i = 1; i < 8; i++)
if (sc->sc_bssidmask & (1<<i))
mask &= ~((i<<2)|0x2);
sc->sc_hwbssidmask[0] |= mask;
}
}
/*
* Assign a beacon xmit slot. We try to space out
* assignments so when beacons are staggered the
* traffic coming out of the cab q has maximal time
* to go out before the next beacon is scheduled.
*/
static int
assign_bslot(struct ath_softc *sc)
{
u_int slot, free;
free = 0;
for (slot = 0; slot < ATH_BCBUF; slot++)
if (sc->sc_bslot[slot] == NULL) {
if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
return slot;
free = slot;
/* NB: keep looking for a double slot */
}
return free;
}
static struct ieee80211vap *
ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac0[IEEE80211_ADDR_LEN])
{
struct ath_softc *sc = ic->ic_softc;
struct ath_vap *avp;
struct ieee80211vap *vap;
uint8_t mac[IEEE80211_ADDR_LEN];
int needbeacon, error;
enum ieee80211_opmode ic_opmode;
avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
M_80211_VAP, M_WAITOK | M_ZERO);
needbeacon = 0;
IEEE80211_ADDR_COPY(mac, mac0);
ATH_LOCK(sc);
ic_opmode = opmode; /* default to opmode of new vap */
switch (opmode) {
case IEEE80211_M_STA:
if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
device_printf(sc->sc_dev, "only 1 sta vap supported\n");
goto bad;
}
if (sc->sc_nvaps) {
/*
* With multiple vaps we must fall back
* to s/w beacon miss handling.
*/
flags |= IEEE80211_CLONE_NOBEACONS;
}
if (flags & IEEE80211_CLONE_NOBEACONS) {
/*
* Station mode w/o beacons are implemented w/ AP mode.
*/
ic_opmode = IEEE80211_M_HOSTAP;
}
break;
case IEEE80211_M_IBSS:
if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
device_printf(sc->sc_dev,
"only 1 ibss vap supported\n");
goto bad;
}
needbeacon = 1;
break;
case IEEE80211_M_AHDEMO:
#ifdef IEEE80211_SUPPORT_TDMA
if (flags & IEEE80211_CLONE_TDMA) {
if (sc->sc_nvaps != 0) {
device_printf(sc->sc_dev,
"only 1 tdma vap supported\n");
goto bad;
}
needbeacon = 1;
flags |= IEEE80211_CLONE_NOBEACONS;
}
/* fall thru... */
#endif
case IEEE80211_M_MONITOR:
if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
/*
* Adopt existing mode. Adding a monitor or ahdemo
* vap to an existing configuration is of dubious
* value but should be ok.
*/
/* XXX not right for monitor mode */
ic_opmode = ic->ic_opmode;
}
break;
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
needbeacon = 1;
break;
case IEEE80211_M_WDS:
if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
device_printf(sc->sc_dev,
"wds not supported in sta mode\n");
goto bad;
}
/*
* Silently remove any request for a unique
* bssid; WDS vap's always share the local
* mac address.
*/
flags &= ~IEEE80211_CLONE_BSSID;
if (sc->sc_nvaps == 0)
ic_opmode = IEEE80211_M_HOSTAP;
else
ic_opmode = ic->ic_opmode;
break;
default:
device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
goto bad;
}
/*
* Check that a beacon buffer is available; the code below assumes it.
*/
if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
device_printf(sc->sc_dev, "no beacon buffer available\n");
goto bad;
}
/* STA, AHDEMO? */
if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
}
vap = &avp->av_vap;
/* XXX can't hold mutex across if_alloc */
ATH_UNLOCK(sc);
- error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
- bssid, mac);
+ error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
ATH_LOCK(sc);
if (error != 0) {
device_printf(sc->sc_dev, "%s: error %d creating vap\n",
__func__, error);
goto bad2;
}
/* h/w crypto support */
vap->iv_key_alloc = ath_key_alloc;
vap->iv_key_delete = ath_key_delete;
vap->iv_key_set = ath_key_set;
vap->iv_key_update_begin = ath_key_update_begin;
vap->iv_key_update_end = ath_key_update_end;
/* override various methods */
avp->av_recv_mgmt = vap->iv_recv_mgmt;
vap->iv_recv_mgmt = ath_recv_mgmt;
vap->iv_reset = ath_reset_vap;
vap->iv_update_beacon = ath_beacon_update;
avp->av_newstate = vap->iv_newstate;
vap->iv_newstate = ath_newstate;
avp->av_bmiss = vap->iv_bmiss;
vap->iv_bmiss = ath_bmiss_vap;
avp->av_node_ps = vap->iv_node_ps;
vap->iv_node_ps = ath_node_powersave;
avp->av_set_tim = vap->iv_set_tim;
vap->iv_set_tim = ath_node_set_tim;
avp->av_recv_pspoll = vap->iv_recv_pspoll;
vap->iv_recv_pspoll = ath_node_recv_pspoll;
/* Set default parameters */
/*
* Anything earlier than some AR9300 series MACs don't
* support a smaller MPDU density.
*/
vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
/*
* All NICs can handle the maximum size, however
* AR5416 based MACs can only TX aggregates w/ RTS
* protection when the total aggregate size is <= 8k.
* However, for now that's enforced by the TX path.
*/
vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
avp->av_bslot = -1;
if (needbeacon) {
/*
* Allocate beacon state and setup the q for buffered
* multicast frames. We know a beacon buffer is
* available because we checked above.
*/
avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
/*
* Assign the vap to a beacon xmit slot. As above
* this cannot fail to find a free one.
*/
avp->av_bslot = assign_bslot(sc);
KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
("beacon slot %u not empty", avp->av_bslot));
sc->sc_bslot[avp->av_bslot] = vap;
sc->sc_nbcnvaps++;
}
if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
/*
* Multple vaps are to transmit beacons and we
* have h/w support for TSF adjusting; enable
* use of staggered beacons.
*/
sc->sc_stagbeacons = 1;
}
ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
}
ic->ic_opmode = ic_opmode;
if (opmode != IEEE80211_M_WDS) {
sc->sc_nvaps++;
if (opmode == IEEE80211_M_STA)
sc->sc_nstavaps++;
if (opmode == IEEE80211_M_MBSS)
sc->sc_nmeshvaps++;
}
switch (ic_opmode) {
case IEEE80211_M_IBSS:
sc->sc_opmode = HAL_M_IBSS;
break;
case IEEE80211_M_STA:
sc->sc_opmode = HAL_M_STA;
break;
case IEEE80211_M_AHDEMO:
#ifdef IEEE80211_SUPPORT_TDMA
if (vap->iv_caps & IEEE80211_C_TDMA) {
sc->sc_tdma = 1;
/* NB: disable tsf adjust */
sc->sc_stagbeacons = 0;
}
/*
* NB: adhoc demo mode is a pseudo mode; to the hal it's
* just ap mode.
*/
/* fall thru... */
#endif
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
sc->sc_opmode = HAL_M_HOSTAP;
break;
case IEEE80211_M_MONITOR:
sc->sc_opmode = HAL_M_MONITOR;
break;
default:
/* XXX should not happen */
break;
}
if (sc->sc_hastsfadd) {
/*
* Configure whether or not TSF adjust should be done.
*/
ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
}
if (flags & IEEE80211_CLONE_NOBEACONS) {
/*
* Enable s/w beacon miss handling.
*/
sc->sc_swbmiss = 1;
}
ATH_UNLOCK(sc);
/* complete setup */
- ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status,
+ mac);
return vap;
bad2:
reclaim_address(sc, mac);
ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
bad:
free(avp, M_80211_VAP);
ATH_UNLOCK(sc);
return NULL;
}
static void
ath_vap_delete(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct ath_softc *sc = ic->ic_softc;
struct ath_hal *ah = sc->sc_ah;
struct ath_vap *avp = ATH_VAP(vap);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (sc->sc_running) {
/*
* Quiesce the hardware while we remove the vap. In
* particular we need to reclaim all references to
* the vap state by any frames pending on the tx queues.
*/
ath_hal_intrset(ah, 0); /* disable interrupts */
/* XXX Do all frames from all vaps/nodes need draining here? */
ath_stoprecv(sc, 1); /* stop recv side */
ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
}
/* .. leave the hardware awake for now. */
ieee80211_vap_detach(vap);
/*
* XXX Danger Will Robinson! Danger!
*
* Because ieee80211_vap_detach() can queue a frame (the station
* diassociate message?) after we've drained the TXQ and
* flushed the software TXQ, we will end up with a frame queued
* to a node whose vap is about to be freed.
*
* To work around this, flush the hardware/software again.
* This may be racy - the ath task may be running and the packet
* may be being scheduled between sw->hw txq. Tsk.
*
* TODO: figure out why a new node gets allocated somewhere around
- * here (after the ath_tx_swq() call; and after an ath_stop_locked()
+ * here (after the ath_tx_swq() call; and after an ath_stop()
* call!)
*/
ath_draintxq(sc, ATH_RESET_DEFAULT);
ATH_LOCK(sc);
/*
* Reclaim beacon state. Note this must be done before
* the vap instance is reclaimed as we may have a reference
* to it in the buffer for the beacon frame.
*/
if (avp->av_bcbuf != NULL) {
if (avp->av_bslot != -1) {
sc->sc_bslot[avp->av_bslot] = NULL;
sc->sc_nbcnvaps--;
}
ath_beacon_return(sc, avp->av_bcbuf);
avp->av_bcbuf = NULL;
if (sc->sc_nbcnvaps == 0) {
sc->sc_stagbeacons = 0;
if (sc->sc_hastsfadd)
ath_hal_settsfadjust(sc->sc_ah, 0);
}
/*
* Reclaim any pending mcast frames for the vap.
*/
ath_tx_draintxq(sc, &avp->av_mcastq);
}
/*
* Update bookkeeping.
*/
if (vap->iv_opmode == IEEE80211_M_STA) {
sc->sc_nstavaps--;
if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
sc->sc_swbmiss = 0;
} else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_MBSS) {
reclaim_address(sc, vap->iv_myaddr);
ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
if (vap->iv_opmode == IEEE80211_M_MBSS)
sc->sc_nmeshvaps--;
}
if (vap->iv_opmode != IEEE80211_M_WDS)
sc->sc_nvaps--;
#ifdef IEEE80211_SUPPORT_TDMA
/* TDMA operation ceases when the last vap is destroyed */
if (sc->sc_tdma && sc->sc_nvaps == 0) {
sc->sc_tdma = 0;
sc->sc_swbmiss = 0;
}
#endif
free(avp, M_80211_VAP);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (sc->sc_running) {
/*
* Restart rx+tx machines if still running (RUNNING will
* be reset if we just destroyed the last vap).
*/
if (ath_startrecv(sc) != 0)
device_printf(sc->sc_dev,
"%s: unable to restart recv logic\n", __func__);
if (sc->sc_beacons) { /* restart beacons */
#ifdef IEEE80211_SUPPORT_TDMA
if (sc->sc_tdma)
ath_tdma_config(sc, NULL);
else
#endif
ath_beacon_config(sc, NULL);
}
ath_hal_intrset(ah, sc->sc_imask);
}
/* Ok, let the hardware asleep. */
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
}
void
ath_suspend(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
- DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
+ sc->sc_resume_up = ic->ic_nrunning != 0;
- sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
-
ieee80211_suspend_all(ic);
/*
* NB: don't worry about putting the chip in low power
* mode; pci will power off our socket on suspend and
* CardBus detaches the device.
*
* XXX TODO: well, that's great, except for non-cardbus
* devices!
*/
/*
* XXX This doesn't wait until all pending taskqueue
* items and parallel transmit/receive/other threads
* are running!
*/
ath_hal_intrset(sc->sc_ah, 0);
taskqueue_block(sc->sc_tq);
ATH_LOCK(sc);
callout_stop(&sc->sc_cal_ch);
ATH_UNLOCK(sc);
/*
* XXX ensure sc_invalid is 1
*/
/* Disable the PCIe PHY, complete with workarounds */
ath_hal_enablepcie(sc->sc_ah, 1, 1);
}
/*
* Reset the key cache since some parts do not reset the
* contents on resume. First we clear all entries, then
* re-load keys that the 802.11 layer assumes are setup
* in h/w.
*/
static void
ath_reset_keycache(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
int i;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
for (i = 0; i < sc->sc_keymax; i++)
ath_hal_keyreset(ah, i);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
ieee80211_crypto_reload_keys(ic);
}
/*
* Fetch the current chainmask configuration based on the current
* operating channel and options.
*/
static void
ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan)
{
/*
* Set TX chainmask to the currently configured chainmask;
* the TX chainmask depends upon the current operating mode.
*/
sc->sc_cur_rxchainmask = sc->sc_rxchainmask;
if (IEEE80211_IS_CHAN_HT(chan)) {
sc->sc_cur_txchainmask = sc->sc_txchainmask;
} else {
sc->sc_cur_txchainmask = 1;
}
DPRINTF(sc, ATH_DEBUG_RESET,
"%s: TX chainmask is now 0x%x, RX is now 0x%x\n",
__func__,
sc->sc_cur_txchainmask,
sc->sc_cur_rxchainmask);
}
void
ath_resume(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
HAL_STATUS status;
- DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
-
- /* Re-enable PCIe, re-enable the PCIe bus */
ath_hal_enablepcie(ah, 0, 0);
/*
* Must reset the chip before we reload the
* keycache as we were powered down on suspend.
*/
ath_update_chainmasks(sc,
sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan);
ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
sc->sc_cur_rxchainmask);
/* Ensure we set the current power state to on */
ATH_LOCK(sc);
ath_power_setselfgen(sc, HAL_PM_AWAKE);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ath_power_setpower(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_hal_reset(ah, sc->sc_opmode,
sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
AH_FALSE, &status);
ath_reset_keycache(sc);
ATH_RX_LOCK(sc);
sc->sc_rx_stopped = 1;
sc->sc_rx_resetted = 1;
ATH_RX_UNLOCK(sc);
/* Let DFS at it in case it's a DFS channel */
ath_dfs_radar_enable(sc, ic->ic_curchan);
/* Let spectral at in case spectral is enabled */
ath_spectral_enable(sc, ic->ic_curchan);
/*
* Let bluetooth coexistence at in case it's needed for this channel
*/
ath_btcoex_enable(sc, ic->ic_curchan);
/*
* If we're doing TDMA, enforce the TXOP limitation for chips that
* support it.
*/
if (sc->sc_hasenforcetxop && sc->sc_tdma)
ath_hal_setenforcetxop(sc->sc_ah, 1);
else
ath_hal_setenforcetxop(sc->sc_ah, 0);
/* Restore the LED configuration */
ath_led_config(sc);
ath_hal_setledstate(ah, HAL_LED_INIT);
if (sc->sc_resume_up)
ieee80211_resume_all(ic);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
/* XXX beacons ? */
}
void
ath_shutdown(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
-
- ath_stop(ifp);
+ ATH_LOCK(sc);
+ ath_stop(sc);
+ ATH_UNLOCK(sc);
/* NB: no point powering down chip as we're about to reboot */
}
/*
* Interrupt handler. Most of the actual processing is deferred.
*/
void
ath_intr(void *arg)
{
struct ath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
struct ath_hal *ah = sc->sc_ah;
HAL_INT status = 0;
uint32_t txqs;
/*
* If we're inside a reset path, just print a warning and
* clear the ISR. The reset routine will finish it for us.
*/
ATH_PCU_LOCK(sc);
if (sc->sc_inreset_cnt) {
HAL_INT status;
ath_hal_getisr(ah, &status); /* clear ISR */
ath_hal_intrset(ah, 0); /* disable further intr's */
DPRINTF(sc, ATH_DEBUG_ANY,
"%s: in reset, ignoring: status=0x%x\n",
__func__, status);
ATH_PCU_UNLOCK(sc);
return;
}
if (sc->sc_invalid) {
/*
* The hardware is not ready/present, don't touch anything.
* Note this can happen early on if the IRQ is shared.
*/
DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
ATH_PCU_UNLOCK(sc);
return;
}
if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
ATH_PCU_UNLOCK(sc);
return;
}
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
- if ((ifp->if_flags & IFF_UP) == 0 ||
- (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if (sc->sc_ic.ic_nrunning == 0 && sc->sc_running == 0) {
HAL_INT status;
- DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
- __func__, ifp->if_flags);
+ DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_nrunning %d sc_running %d\n",
+ __func__, sc->sc_ic.ic_nrunning, sc->sc_running);
ath_hal_getisr(ah, &status); /* clear ISR */
ath_hal_intrset(ah, 0); /* disable further intr's */
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return;
}
/*
* Figure out the reason(s) for the interrupt. Note
* that the hal returns a pseudo-ISR that may include
* bits we haven't explicitly enabled so we mask the
* value to insure we only process bits we requested.
*/
ath_hal_getisr(ah, &status); /* NB: clears ISR too */
DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status);
#ifdef ATH_DEBUG_ALQ
if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate,
ah->ah_syncstate);
#endif /* ATH_DEBUG_ALQ */
#ifdef ATH_KTR_INTR_DEBUG
ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5,
"ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
ah->ah_intrstate[0],
ah->ah_intrstate[1],
ah->ah_intrstate[2],
ah->ah_intrstate[3],
ah->ah_intrstate[6]);
#endif
/* Squirrel away SYNC interrupt debugging */
if (ah->ah_syncstate != 0) {
int i;
for (i = 0; i < 32; i++)
if (ah->ah_syncstate & (i << i))
sc->sc_intr_stats.sync_intr[i]++;
}
status &= sc->sc_imask; /* discard unasked for bits */
/* Short-circuit un-handled interrupts */
if (status == 0x0) {
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return;
}
/*
* Take a note that we're inside the interrupt handler, so
* the reset routines know to wait.
*/
sc->sc_intr_cnt++;
ATH_PCU_UNLOCK(sc);
/*
* Handle the interrupt. We won't run concurrent with the reset
* or channel change routines as they'll wait for sc_intr_cnt
* to be 0 before continuing.
*/
if (status & HAL_INT_FATAL) {
sc->sc_stats.ast_hardware++;
ath_hal_intrset(ah, 0); /* disable intr's until reset */
taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
} else {
if (status & HAL_INT_SWBA) {
/*
* Software beacon alert--time to send a beacon.
* Handle beacon transmission directly; deferring
* this is too slow to meet timing constraints
* under load.
*/
#ifdef IEEE80211_SUPPORT_TDMA
if (sc->sc_tdma) {
if (sc->sc_tdmaswba == 0) {
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap =
TAILQ_FIRST(&ic->ic_vaps);
ath_tdma_beacon_send(sc, vap);
sc->sc_tdmaswba =
vap->iv_tdma->tdma_bintval;
} else
sc->sc_tdmaswba--;
} else
#endif
{
ath_beacon_proc(sc, 0);
#ifdef IEEE80211_SUPPORT_SUPERG
/*
* Schedule the rx taskq in case there's no
* traffic so any frames held on the staging
* queue are aged and potentially flushed.
*/
sc->sc_rx.recv_sched(sc, 1);
#endif
}
}
if (status & HAL_INT_RXEOL) {
int imask;
ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL");
if (! sc->sc_isedma) {
ATH_PCU_LOCK(sc);
/*
* NB: the hardware should re-read the link when
* RXE bit is written, but it doesn't work at
* least on older hardware revs.
*/
sc->sc_stats.ast_rxeol++;
/*
* Disable RXEOL/RXORN - prevent an interrupt
* storm until the PCU logic can be reset.
* In case the interface is reset some other
* way before "sc_kickpcu" is called, don't
* modify sc_imask - that way if it is reset
* by a call to ath_reset() somehow, the
* interrupt mask will be correctly reprogrammed.
*/
imask = sc->sc_imask;
imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
ath_hal_intrset(ah, imask);
/*
* Only blank sc_rxlink if we've not yet kicked
* the PCU.
*
* This isn't entirely correct - the correct solution
* would be to have a PCU lock and engage that for
* the duration of the PCU fiddling; which would include
* running the RX process. Otherwise we could end up
* messing up the RX descriptor chain and making the
* RX desc list much shorter.
*/
if (! sc->sc_kickpcu)
sc->sc_rxlink = NULL;
sc->sc_kickpcu = 1;
ATH_PCU_UNLOCK(sc);
}
/*
* Enqueue an RX proc to handle whatever
* is in the RX queue.
* This will then kick the PCU if required.
*/
sc->sc_rx.recv_sched(sc, 1);
}
if (status & HAL_INT_TXURN) {
sc->sc_stats.ast_txurn++;
/* bump tx trigger level */
ath_hal_updatetxtriglevel(ah, AH_TRUE);
}
/*
* Handle both the legacy and RX EDMA interrupt bits.
* Note that HAL_INT_RXLP is also HAL_INT_RXDESC.
*/
if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) {
sc->sc_stats.ast_rx_intr++;
sc->sc_rx.recv_sched(sc, 1);
}
if (status & HAL_INT_TX) {
sc->sc_stats.ast_tx_intr++;
/*
* Grab all the currently set bits in the HAL txq bitmap
* and blank them. This is the only place we should be
* doing this.
*/
if (! sc->sc_isedma) {
ATH_PCU_LOCK(sc);
txqs = 0xffffffff;
ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3,
"ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x",
txqs,
sc->sc_txq_active,
sc->sc_txq_active | txqs);
sc->sc_txq_active |= txqs;
ATH_PCU_UNLOCK(sc);
}
taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
}
if (status & HAL_INT_BMISS) {
sc->sc_stats.ast_bmiss++;
taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
}
if (status & HAL_INT_GTT)
sc->sc_stats.ast_tx_timeout++;
if (status & HAL_INT_CST)
sc->sc_stats.ast_tx_cst++;
if (status & HAL_INT_MIB) {
sc->sc_stats.ast_mib++;
ATH_PCU_LOCK(sc);
/*
* Disable interrupts until we service the MIB
* interrupt; otherwise it will continue to fire.
*/
ath_hal_intrset(ah, 0);
/*
* Let the hal handle the event. We assume it will
* clear whatever condition caused the interrupt.
*/
ath_hal_mibevent(ah, &sc->sc_halstats);
/*
* Don't reset the interrupt if we've just
* kicked the PCU, or we may get a nested
* RXEOL before the rxproc has had a chance
* to run.
*/
if (sc->sc_kickpcu == 0)
ath_hal_intrset(ah, sc->sc_imask);
ATH_PCU_UNLOCK(sc);
}
if (status & HAL_INT_RXORN) {
/* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN");
sc->sc_stats.ast_rxorn++;
}
if (status & HAL_INT_TSFOOR) {
device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__);
sc->sc_syncbeacon = 1;
}
}
ATH_PCU_LOCK(sc);
sc->sc_intr_cnt--;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
}
static void
ath_fatal_proc(void *arg, int pending)
{
struct ath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
u_int32_t *state;
u_int32_t len;
void *sp;
if (sc->sc_invalid)
return;
device_printf(sc->sc_dev, "hardware error; resetting\n");
/*
* Fatal errors are unrecoverable. Typically these
* are caused by DMA errors. Collect h/w state from
* the hal so we can diagnose what's going on.
*/
if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
state = sp;
device_printf(sc->sc_dev,
"0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", state[0],
state[1] , state[2], state[3], state[4], state[5]);
}
- ath_reset(ifp, ATH_RESET_NOLOSS);
+ ath_reset(sc, ATH_RESET_NOLOSS);
}
static void
ath_bmiss_vap(struct ieee80211vap *vap)
{
struct ath_softc *sc = vap->iv_ic->ic_softc;
/*
* Workaround phantom bmiss interrupts by sanity-checking
* the time of our last rx'd frame. If it is within the
* beacon miss interval then ignore the interrupt. If it's
* truly a bmiss we'll get another interrupt soon and that'll
* be dispatched up for processing. Note this applies only
* for h/w beacon miss events.
*/
/*
* XXX TODO: Just read the TSF during the interrupt path;
* that way we don't have to wake up again just to read it
* again.
*/
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
u_int64_t lastrx = sc->sc_lastrx;
u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
/* XXX should take a locked ref to iv_bss */
u_int bmisstimeout =
vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
__func__, (unsigned long long) tsf,
(unsigned long long)(tsf - lastrx),
(unsigned long long) lastrx, bmisstimeout);
if (tsf - lastrx <= bmisstimeout) {
sc->sc_stats.ast_bmiss_phantom++;
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return;
}
}
/*
* There's no need to keep the hardware awake during the call
* to av_bmiss().
*/
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
/*
* Attempt to force a beacon resync.
*/
sc->sc_syncbeacon = 1;
ATH_VAP(vap)->av_bmiss(vap);
}
/* XXX this needs a force wakeup! */
int
ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
{
uint32_t rsize;
void *sp;
if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
return 0;
KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
*hangs = *(uint32_t *)sp;
return 1;
}
static void
ath_bmiss_proc(void *arg, int pending)
{
struct ath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t hangs;
DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_beacon_miss(sc);
/*
* Do a reset upon any becaon miss event.
*
* It may be a non-recognised RX clear hang which needs a reset
* to clear.
*/
if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
- ath_reset(ifp, ATH_RESET_NOLOSS);
+ ath_reset(sc, ATH_RESET_NOLOSS);
device_printf(sc->sc_dev,
"bb hang detected (0x%x), resetting\n", hangs);
} else {
- ath_reset(ifp, ATH_RESET_NOLOSS);
- ieee80211_beacon_miss(ifp->if_l2com);
+ ath_reset(sc, ATH_RESET_NOLOSS);
+ ieee80211_beacon_miss(&sc->sc_ic);
}
/* Force a beacon resync, in case they've drifted */
sc->sc_syncbeacon = 1;
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
}
/*
* Handle TKIP MIC setup to deal hardware that doesn't do MIC
* calcs together with WME. If necessary disable the crypto
* hardware and mark the 802.11 state so keys will be setup
* with the MIC work done in software.
*/
static void
ath_settkipmic(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
if (ic->ic_flags & IEEE80211_F_WME) {
ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
} else {
ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
}
}
}
-static void
-ath_init(void *arg)
+static int
+ath_init(struct ath_softc *sc)
{
- struct ath_softc *sc = (struct ath_softc *) arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
HAL_STATUS status;
- DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
- __func__, ifp->if_flags);
+ ATH_LOCK_ASSERT(sc);
- ATH_LOCK(sc);
/*
* Force the sleep state awake.
*/
ath_power_setselfgen(sc, HAL_PM_AWAKE);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ath_power_setpower(sc, HAL_PM_AWAKE);
/*
* Stop anything previously setup. This is safe
* whether this is the first time through or not.
*/
- ath_stop_locked(ifp);
+ ath_stop(sc);
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
* be powered up and with interrupts disabled. This must
* be followed by initialization of the appropriate bits
* and then setup of the interrupt mask.
*/
ath_settkipmic(sc);
ath_update_chainmasks(sc, ic->ic_curchan);
ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
sc->sc_cur_rxchainmask);
if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE,
&status)) {
device_printf(sc->sc_dev,
"unable to reset hardware; hal status %u\n", status);
- ATH_UNLOCK(sc);
- return;
+ return (ENODEV);
}
ATH_RX_LOCK(sc);
sc->sc_rx_stopped = 1;
sc->sc_rx_resetted = 1;
ATH_RX_UNLOCK(sc);
ath_chan_change(sc, ic->ic_curchan);
/* Let DFS at it in case it's a DFS channel */
ath_dfs_radar_enable(sc, ic->ic_curchan);
/* Let spectral at in case spectral is enabled */
ath_spectral_enable(sc, ic->ic_curchan);
/*
* Let bluetooth coexistence at in case it's needed for this channel
*/
ath_btcoex_enable(sc, ic->ic_curchan);
/*
* If we're doing TDMA, enforce the TXOP limitation for chips that
* support it.
*/
if (sc->sc_hasenforcetxop && sc->sc_tdma)
ath_hal_setenforcetxop(sc->sc_ah, 1);
else
ath_hal_setenforcetxop(sc->sc_ah, 0);
/*
* Likewise this is set during reset so update
* state cached in the driver.
*/
sc->sc_diversity = ath_hal_getdiversity(ah);
sc->sc_lastlongcal = ticks;
sc->sc_resetcal = 1;
sc->sc_lastcalreset = 0;
sc->sc_lastani = ticks;
sc->sc_lastshortcal = ticks;
sc->sc_doresetcal = AH_FALSE;
/*
* Beacon timers were cleared here; give ath_newstate()
* a hint that the beacon timers should be poked when
* things transition to the RUN state.
*/
sc->sc_beacons = 0;
/*
* Setup the hardware after reset: the key cache
* is filled as needed and the receive engine is
* set going. Frame transmit is handled entirely
* in the frame output path; there's nothing to do
* here except setup the interrupt mask.
*/
if (ath_startrecv(sc) != 0) {
device_printf(sc->sc_dev, "unable to start recv logic\n");
ath_power_restore_power_state(sc);
- ATH_UNLOCK(sc);
- return;
+ return (ENODEV);
}
/*
* Enable interrupts.
*/
sc->sc_imask = HAL_INT_RX | HAL_INT_TX
| HAL_INT_RXORN | HAL_INT_TXURN
| HAL_INT_FATAL | HAL_INT_GLOBAL;
/*
* Enable RX EDMA bits. Note these overlap with
* HAL_INT_RX and HAL_INT_RXDESC respectively.
*/
if (sc->sc_isedma)
sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP);
/*
* If we're an EDMA NIC, we don't care about RXEOL.
* Writing a new descriptor in will simply restart
* RX DMA.
*/
if (! sc->sc_isedma)
sc->sc_imask |= HAL_INT_RXEOL;
/*
* Enable MIB interrupts when there are hardware phy counters.
* Note we only do this (at the moment) for station mode.
*/
if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
sc->sc_imask |= HAL_INT_MIB;
/*
* XXX add capability for this.
*
* If we're in STA mode (and maybe IBSS?) then register for
* TSFOOR interrupts.
*/
if (ic->ic_opmode == IEEE80211_M_STA)
sc->sc_imask |= HAL_INT_TSFOOR;
/* Enable global TX timeout and carrier sense timeout if available */
if (ath_hal_gtxto_supported(ah))
sc->sc_imask |= HAL_INT_GTT;
DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
__func__, sc->sc_imask);
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_running = 1;
callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
ath_hal_intrset(ah, sc->sc_imask);
ath_power_restore_power_state(sc);
- ATH_UNLOCK(sc);
-#ifdef ATH_TX99_DIAG
- if (sc->sc_tx99 != NULL)
- sc->sc_tx99->start(sc->sc_tx99);
- else
-#endif
- ieee80211_start_all(ic); /* start all vap's */
+ return (0);
}
static void
-ath_stop_locked(struct ifnet *ifp)
+ath_stop(struct ath_softc *sc)
{
- struct ath_softc *sc = ifp->if_softc;
struct ath_hal *ah = sc->sc_ah;
- DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
- __func__, sc->sc_invalid, ifp->if_flags);
-
ATH_LOCK_ASSERT(sc);
/*
* Wake the hardware up before fiddling with it.
*/
ath_power_set_power_state(sc, HAL_PM_AWAKE);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (sc->sc_running) {
/*
* Shutdown the hardware and driver:
* reset 802.11 state machine
* turn off timers
* disable interrupts
* turn off the radio
* clear transmit machinery
* clear receive machinery
* drain and release tx queues
* reclaim beacon resources
* power down hardware
*
* Note that some of this work is not possible if the
* hardware is gone (invalid).
*/
#ifdef ATH_TX99_DIAG
if (sc->sc_tx99 != NULL)
sc->sc_tx99->stop(sc->sc_tx99);
#endif
callout_stop(&sc->sc_wd_ch);
sc->sc_wd_timer = 0;
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ sc->sc_running = 0;
if (!sc->sc_invalid) {
if (sc->sc_softled) {
callout_stop(&sc->sc_ledtimer);
ath_hal_gpioset(ah, sc->sc_ledpin,
!sc->sc_ledon);
sc->sc_blinking = 0;
}
ath_hal_intrset(ah, 0);
}
/* XXX we should stop RX regardless of whether it's valid */
if (!sc->sc_invalid) {
ath_stoprecv(sc, 1);
ath_hal_phydisable(ah);
} else
sc->sc_rxlink = NULL;
ath_draintxq(sc, ATH_RESET_DEFAULT);
ath_beacon_free(sc); /* XXX not needed */
}
/* And now, restore the current power state */
ath_power_restore_power_state(sc);
}
/*
* Wait until all pending TX/RX has completed.
*
* This waits until all existing transmit, receive and interrupts
* have completed. It's assumed that the caller has first
* grabbed the reset lock so it doesn't try to do overlapping
* chip resets.
*/
#define MAX_TXRX_ITERATIONS 100
static void
ath_txrx_stop_locked(struct ath_softc *sc)
{
int i = MAX_TXRX_ITERATIONS;
ATH_UNLOCK_ASSERT(sc);
ATH_PCU_LOCK_ASSERT(sc);
/*
* Sleep until all the pending operations have completed.
*
* The caller must ensure that reset has been incremented
* or the pending operations may continue being queued.
*/
while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
sc->sc_txstart_cnt || sc->sc_intr_cnt) {
if (i <= 0)
break;
msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop",
msecs_to_ticks(10));
i--;
}
if (i <= 0)
device_printf(sc->sc_dev,
"%s: didn't finish after %d iterations\n",
__func__, MAX_TXRX_ITERATIONS);
}
#undef MAX_TXRX_ITERATIONS
#if 0
static void
ath_txrx_stop(struct ath_softc *sc)
{
ATH_UNLOCK_ASSERT(sc);
ATH_PCU_UNLOCK_ASSERT(sc);
ATH_PCU_LOCK(sc);
ath_txrx_stop_locked(sc);
ATH_PCU_UNLOCK(sc);
}
#endif
static void
ath_txrx_start(struct ath_softc *sc)
{
taskqueue_unblock(sc->sc_tq);
}
/*
* Grab the reset lock, and wait around until noone else
* is trying to do anything with it.
*
* This is totally horrible but we can't hold this lock for
* long enough to do TX/RX or we end up with net80211/ip stack
* LORs and eventual deadlock.
*
* "dowait" signals whether to spin, waiting for the reset
* lock count to reach 0. This should (for now) only be used
* during the reset path, as the rest of the code may not
* be locking-reentrant enough to behave correctly.
*
* Another, cleaner way should be found to serialise all of
* these operations.
*/
#define MAX_RESET_ITERATIONS 25
static int
ath_reset_grablock(struct ath_softc *sc, int dowait)
{
int w = 0;
int i = MAX_RESET_ITERATIONS;
ATH_PCU_LOCK_ASSERT(sc);
do {
if (sc->sc_inreset_cnt == 0) {
w = 1;
break;
}
if (dowait == 0) {
w = 0;
break;
}
ATH_PCU_UNLOCK(sc);
/*
* 1 tick is likely not enough time for long calibrations
* to complete. So we should wait quite a while.
*/
pause("ath_reset_grablock", msecs_to_ticks(100));
i--;
ATH_PCU_LOCK(sc);
} while (i > 0);
/*
* We always increment the refcounter, regardless
* of whether we succeeded to get it in an exclusive
* way.
*/
sc->sc_inreset_cnt++;
if (i <= 0)
device_printf(sc->sc_dev,
"%s: didn't finish after %d iterations\n",
__func__, MAX_RESET_ITERATIONS);
if (w == 0)
device_printf(sc->sc_dev,
"%s: warning, recursive reset path!\n",
__func__);
return w;
}
#undef MAX_RESET_ITERATIONS
/*
- * XXX TODO: write ath_reset_releaselock
- */
-
-static void
-ath_stop(struct ifnet *ifp)
-{
- struct ath_softc *sc = ifp->if_softc;
-
- ATH_LOCK(sc);
- ath_stop_locked(ifp);
- ATH_UNLOCK(sc);
-}
-
-/*
* Reset the hardware w/o losing operational state. This is
* basically a more efficient way of doing ath_stop, ath_init,
* followed by state transitions to the current 802.11
* operational state. Used to recover from various errors and
* to reset or reload hardware state.
*/
int
-ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
+ath_reset(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
{
- struct ieee80211com *ic = ifp->if_l2com;
- struct ath_softc *sc = ic->ic_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
HAL_STATUS status;
int i;
DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
/* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
ATH_PCU_UNLOCK_ASSERT(sc);
ATH_UNLOCK_ASSERT(sc);
/* Try to (stop any further TX/RX from occuring */
taskqueue_block(sc->sc_tq);
/*
* Wake the hardware up.
*/
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ATH_PCU_LOCK(sc);
/*
* Grab the reset lock before TX/RX is stopped.
*
* This is needed to ensure that when the TX/RX actually does finish,
* no further TX/RX/reset runs in parallel with this.
*/
if (ath_reset_grablock(sc, 1) == 0) {
device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
__func__);
}
/* disable interrupts */
ath_hal_intrset(ah, 0);
/*
* Now, ensure that any in progress TX/RX completes before we
* continue.
*/
ath_txrx_stop_locked(sc);
ATH_PCU_UNLOCK(sc);
/*
* Regardless of whether we're doing a no-loss flush or
* not, stop the PCU and handle what's in the RX queue.
* That way frames aren't dropped which shouldn't be.
*/
ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
ath_rx_flush(sc);
/*
* Should now wait for pending TX/RX to complete
* and block future ones from occuring. This needs to be
* done before the TX queue is drained.
*/
ath_draintxq(sc, reset_type); /* stop xmit side */
ath_settkipmic(sc); /* configure TKIP MIC handling */
/* NB: indicate channel change so we do a full reset */
ath_update_chainmasks(sc, ic->ic_curchan);
ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
sc->sc_cur_rxchainmask);
if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
device_printf(sc->sc_dev,
"%s: unable to reset hardware; hal status %u\n",
__func__, status);
sc->sc_diversity = ath_hal_getdiversity(ah);
ATH_RX_LOCK(sc);
sc->sc_rx_stopped = 1;
sc->sc_rx_resetted = 1;
ATH_RX_UNLOCK(sc);
/* Let DFS at it in case it's a DFS channel */
ath_dfs_radar_enable(sc, ic->ic_curchan);
/* Let spectral at in case spectral is enabled */
ath_spectral_enable(sc, ic->ic_curchan);
/*
* Let bluetooth coexistence at in case it's needed for this channel
*/
ath_btcoex_enable(sc, ic->ic_curchan);
/*
* If we're doing TDMA, enforce the TXOP limitation for chips that
* support it.
*/
if (sc->sc_hasenforcetxop && sc->sc_tdma)
ath_hal_setenforcetxop(sc->sc_ah, 1);
else
ath_hal_setenforcetxop(sc->sc_ah, 0);
if (ath_startrecv(sc) != 0) /* restart recv */
device_printf(sc->sc_dev,
"%s: unable to start recv logic\n", __func__);
/*
* We may be doing a reset in response to an ioctl
* that changes the channel so update any state that
* might change as a result.
*/
ath_chan_change(sc, ic->ic_curchan);
if (sc->sc_beacons) { /* restart beacons */
#ifdef IEEE80211_SUPPORT_TDMA
if (sc->sc_tdma)
ath_tdma_config(sc, NULL);
else
#endif
ath_beacon_config(sc, NULL);
}
/*
* Release the reset lock and re-enable interrupts here.
* If an interrupt was being processed in ath_intr(),
* it would disable interrupts at this point. So we have
* to atomically enable interrupts and decrement the
* reset counter - this way ath_intr() doesn't end up
* disabling interrupts without a corresponding enable
* in the rest or channel change path.
*
* Grab the TX reference in case we need to transmit.
* That way a parallel transmit doesn't.
*/
ATH_PCU_LOCK(sc);
sc->sc_inreset_cnt--;
sc->sc_txstart_cnt++;
/* XXX only do this if sc_inreset_cnt == 0? */
ath_hal_intrset(ah, sc->sc_imask);
ATH_PCU_UNLOCK(sc);
/*
* TX and RX can be started here. If it were started with
* sc_inreset_cnt > 0, the TX and RX path would abort.
* Thus if this is a nested call through the reset or
* channel change code, TX completion will occur but
* RX completion and ath_start / ath_tx_start will not
* run.
*/
/* Restart TX/RX as needed */
ath_txrx_start(sc);
/* XXX TODO: we need to hold the tx refcount here! */
/* Restart TX completion and pending TX */
if (reset_type == ATH_RESET_NOLOSS) {
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
ATH_TXQ_LOCK(&sc->sc_txq[i]);
ath_txq_restart_dma(sc, &sc->sc_txq[i]);
ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
ATH_TX_LOCK(sc);
ath_txq_sched(sc, &sc->sc_txq[i]);
ATH_TX_UNLOCK(sc);
}
}
}
- /*
- * This may have been set during an ath_start() call which
- * set this once it detected a concurrent TX was going on.
- * So, clear it.
- */
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
-
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
ATH_PCU_LOCK(sc);
sc->sc_txstart_cnt--;
ATH_PCU_UNLOCK(sc);
/* Handle any frames in the TX queue */
/*
* XXX should this be done by the caller, rather than
* ath_reset() ?
*/
ath_tx_kick(sc); /* restart xmit */
return 0;
}
static int
ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
{
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct ath_softc *sc = ic->ic_softc;
struct ath_hal *ah = sc->sc_ah;
switch (cmd) {
case IEEE80211_IOC_TXPOWER:
/*
* If per-packet TPC is enabled, then we have nothing
* to do; otherwise we need to force the global limit.
* All this can happen directly; no need to reset.
*/
if (!ath_hal_gettpc(ah))
ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
return 0;
}
/* XXX? Full or NOLOSS? */
- return ath_reset(ifp, ATH_RESET_FULL);
+ return ath_reset(sc, ATH_RESET_FULL);
}
struct ath_buf *
_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
{
struct ath_buf *bf;
ATH_TXBUF_LOCK_ASSERT(sc);
if (btype == ATH_BUFTYPE_MGMT)
bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
else
bf = TAILQ_FIRST(&sc->sc_txbuf);
if (bf == NULL) {
sc->sc_stats.ast_tx_getnobuf++;
} else {
if (bf->bf_flags & ATH_BUF_BUSY) {
sc->sc_stats.ast_tx_getbusybuf++;
bf = NULL;
}
}
if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
if (btype == ATH_BUFTYPE_MGMT)
TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
else {
TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
sc->sc_txbuf_cnt--;
/*
* This shuldn't happen; however just to be
* safe print a warning and fudge the txbuf
* count.
*/
if (sc->sc_txbuf_cnt < 0) {
device_printf(sc->sc_dev,
"%s: sc_txbuf_cnt < 0?\n",
__func__);
sc->sc_txbuf_cnt = 0;
}
}
} else
bf = NULL;
if (bf == NULL) {
/* XXX should check which list, mgmt or otherwise */
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
"out of xmit buffers" : "xmit buffer busy");
return NULL;
}
/* XXX TODO: should do this at buffer list initialisation */
/* XXX (then, ensure the buffer has the right flag set) */
bf->bf_flags = 0;
if (btype == ATH_BUFTYPE_MGMT)
bf->bf_flags |= ATH_BUF_MGMT;
else
bf->bf_flags &= (~ATH_BUF_MGMT);
/* Valid bf here; clear some basic fields */
bf->bf_next = NULL; /* XXX just to be sure */
bf->bf_last = NULL; /* XXX again, just to be sure */
bf->bf_comp = NULL; /* XXX again, just to be sure */
bzero(&bf->bf_state, sizeof(bf->bf_state));
/*
* Track the descriptor ID only if doing EDMA
*/
if (sc->sc_isedma) {
bf->bf_descid = sc->sc_txbuf_descid;
sc->sc_txbuf_descid++;
}
return bf;
}
/*
* When retrying a software frame, buffers marked ATH_BUF_BUSY
* can't be thrown back on the queue as they could still be
* in use by the hardware.
*
* This duplicates the buffer, or returns NULL.
*
* The descriptor is also copied but the link pointers and
* the DMA segments aren't copied; this frame should thus
* be again passed through the descriptor setup/chain routines
* so the link is correct.
*
* The caller must free the buffer using ath_freebuf().
*/
struct ath_buf *
ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf)
{
struct ath_buf *tbf;
tbf = ath_getbuf(sc,
(bf->bf_flags & ATH_BUF_MGMT) ?
ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
if (tbf == NULL)
return NULL; /* XXX failure? Why? */
/* Copy basics */
tbf->bf_next = NULL;
tbf->bf_nseg = bf->bf_nseg;
tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE;
tbf->bf_status = bf->bf_status;
tbf->bf_m = bf->bf_m;
tbf->bf_node = bf->bf_node;
KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__));
/* will be setup by the chain/setup function */
tbf->bf_lastds = NULL;
/* for now, last == self */
tbf->bf_last = tbf;
tbf->bf_comp = bf->bf_comp;
/* NOTE: DMA segments will be setup by the setup/chain functions */
/* The caller has to re-init the descriptor + links */
/*
* Free the DMA mapping here, before we NULL the mbuf.
* We must only call bus_dmamap_unload() once per mbuf chain
* or behaviour is undefined.
*/
if (bf->bf_m != NULL) {
/*
* XXX is this POSTWRITE call required?
*/
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
}
bf->bf_m = NULL;
bf->bf_node = NULL;
/* Copy state */
memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
return tbf;
}
struct ath_buf *
ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
{
struct ath_buf *bf;
ATH_TXBUF_LOCK(sc);
bf = _ath_getbuf_locked(sc, btype);
/*
* If a mgmt buffer was requested but we're out of those,
* try requesting a normal one.
*/
if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
ATH_TXBUF_UNLOCK(sc);
if (bf == NULL) {
- struct ifnet *ifp = sc->sc_ifp;
-
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
sc->sc_stats.ast_tx_qstop++;
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
}
return bf;
}
-static void
-ath_qflush(struct ifnet *ifp)
-{
-
- /* XXX TODO */
-}
-
/*
* Transmit a single frame.
*
* net80211 will free the node reference if the transmit
* fails, so don't free the node reference here.
*/
static int
-ath_transmit(struct ifnet *ifp, struct mbuf *m)
+ath_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct ieee80211com *ic = ifp->if_l2com;
struct ath_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
struct mbuf *next;
struct ath_buf *bf;
ath_bufhead frags;
int retval = 0;
/*
* Tell the reset path that we're currently transmitting.
*/
ATH_PCU_LOCK(sc);
if (sc->sc_inreset_cnt > 0) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: sc_inreset_cnt > 0; bailing\n", __func__);
ATH_PCU_UNLOCK(sc);
- IF_LOCK(&ifp->if_snd);
sc->sc_stats.ast_tx_qstop++;
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish");
return (ENOBUFS); /* XXX should be EINVAL or? */
}
sc->sc_txstart_cnt++;
ATH_PCU_UNLOCK(sc);
/* Wake the hardware up already */
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start");
/*
* Grab the TX lock - it's ok to do this here; we haven't
* yet started transmitting.
*/
ATH_TX_LOCK(sc);
/*
* Node reference, if there's one.
*/
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
/*
* Enforce how deep a node queue can get.
*
* XXX it would be nicer if we kept an mbuf queue per
* node and only whacked them into ath_bufs when we
* are ready to schedule some traffic from them.
* .. that may come later.
*
* XXX we should also track the per-node hardware queue
* depth so it is easy to limit the _SUM_ of the swq and
* hwq frames. Since we only schedule two HWQ frames
* at a time, this should be OK for now.
*/
if ((!(m->m_flags & M_EAPOL)) &&
(ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) {
sc->sc_stats.ast_tx_nodeq_overflow++;
- m_freem(m);
- m = NULL;
retval = ENOBUFS;
goto finish;
}
/*
* Check how many TX buffers are available.
*
* If this is for non-EAPOL traffic, just leave some
* space free in order for buffer cloning and raw
* frame transmission to occur.
*
* If it's for EAPOL traffic, ignore this for now.
* Management traffic will be sent via the raw transmit
* method which bypasses this check.
*
* This is needed to ensure that EAPOL frames during
* (re) keying have a chance to go out.
*
* See kern/138379 for more information.
*/
if ((!(m->m_flags & M_EAPOL)) &&
(sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) {
sc->sc_stats.ast_tx_nobuf++;
- m_freem(m);
- m = NULL;
retval = ENOBUFS;
goto finish;
}
/*
* Grab a TX buffer and associated resources.
*
* If it's an EAPOL frame, allocate a MGMT ath_buf.
* That way even with temporary buffer exhaustion due to
* the data path doesn't leave us without the ability
* to transmit management frames.
*
* Otherwise allocate a normal buffer.
*/
if (m->m_flags & M_EAPOL)
bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
else
bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
if (bf == NULL) {
/*
* If we failed to allocate a buffer, fail.
*
* We shouldn't fail normally, due to the check
* above.
*/
sc->sc_stats.ast_tx_nobuf++;
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
- m_freem(m);
- m = NULL;
retval = ENOBUFS;
goto finish;
}
/*
* At this point we have a buffer; so we need to free it
* if we hit any error conditions.
*/
/*
* Check for fragmentation. If this frame
* has been broken up verify we have enough
* buffers to send all the fragments so all
* go out or none...
*/
TAILQ_INIT(&frags);
if ((m->m_flags & M_FRAG) &&
!ath_txfrag_setup(sc, &frags, m, ni)) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: out of txfrag buffers\n", __func__);
sc->sc_stats.ast_tx_nofrag++;
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
+ /*
+ * XXXGL: is mbuf valid after ath_txfrag_setup? If yes,
+ * we shouldn't free it but return back.
+ */
ath_freetx(m);
+ m = NULL;
goto bad;
}
/*
* At this point if we have any TX fragments, then we will
* have bumped the node reference once for each of those.
*/
/*
* XXX Is there anything actually _enforcing_ that the
* fragments are being transmitted in one hit, rather than
* being interleaved with other transmissions on that
* hardware queue?
*
* The ATH TX output lock is the only thing serialising this
* right now.
*/
/*
* Calculate the "next fragment" length field in ath_buf
* in order to let the transmit path know enough about
* what to next write to the hardware.
*/
if (m->m_flags & M_FRAG) {
struct ath_buf *fbf = bf;
struct ath_buf *n_fbf = NULL;
struct mbuf *fm = m->m_nextpkt;
/*
* We need to walk the list of fragments and set
* the next size to the following buffer.
* However, the first buffer isn't in the frag
* list, so we have to do some gymnastics here.
*/
TAILQ_FOREACH(n_fbf, &frags, bf_list) {
fbf->bf_nextfraglen = fm->m_pkthdr.len;
fbf = n_fbf;
fm = fm->m_nextpkt;
}
}
- /*
- * Bump the ifp output counter.
- *
- * XXX should use atomics?
- */
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
nextfrag:
/*
* Pass the frame to the h/w for transmission.
* Fragmented frames have each frag chained together
* with m_nextpkt. We know there are sufficient ath_buf's
* to send all the frags because of work done by
* ath_txfrag_setup. We leave m_nextpkt set while
* calling ath_tx_start so it can use it to extend the
* the tx duration to cover the subsequent frag and
* so it can reclaim all the mbufs in case of an error;
* ath_tx_start clears m_nextpkt once it commits to
* handing the frame to the hardware.
*
* Note: if this fails, then the mbufs are freed but
* not the node reference.
*/
next = m->m_nextpkt;
if (ath_tx_start(sc, ni, bf, m)) {
bad:
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
reclaim:
bf->bf_m = NULL;
bf->bf_node = NULL;
ATH_TXBUF_LOCK(sc);
ath_returnbuf_head(sc, bf);
/*
* Free the rest of the node references and
* buffers for the fragment list.
*/
ath_txfrag_cleanup(sc, &frags, ni);
ATH_TXBUF_UNLOCK(sc);
retval = ENOBUFS;
goto finish;
}
/*
* Check here if the node is in power save state.
*/
ath_tx_update_tim(sc, ni, 1);
if (next != NULL) {
/*
* Beware of state changing between frags.
* XXX check sta power-save state?
*/
if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: flush fragmented packet, state %s\n",
__func__,
ieee80211_state_name[ni->ni_vap->iv_state]);
/* XXX dmamap */
ath_freetx(next);
goto reclaim;
}
m = next;
bf = TAILQ_FIRST(&frags);
KASSERT(bf != NULL, ("no buf for txfrag"));
TAILQ_REMOVE(&frags, bf, bf_list);
goto nextfrag;
}
/*
* Bump watchdog timer.
*/
sc->sc_wd_timer = 5;
finish:
ATH_TX_UNLOCK(sc);
/*
* Finished transmitting!
*/
ATH_PCU_LOCK(sc);
sc->sc_txstart_cnt--;
ATH_PCU_UNLOCK(sc);
/* Sleep the hardware if required */
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished");
return (retval);
}
static int
ath_media_change(struct ifnet *ifp)
{
int error = ieee80211_media_change(ifp);
/* NB: only the fixed rate can change and that doesn't need a reset */
return (error == ENETRESET ? 0 : error);
}
/*
* Block/unblock tx+rx processing while a key change is done.
* We assume the caller serializes key management operations
* so we only need to worry about synchronization with other
* uses that originate in the driver.
*/
static void
ath_key_update_begin(struct ieee80211vap *vap)
{
struct ath_softc *sc = vap->iv_ic->ic_softc;
DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
taskqueue_block(sc->sc_tq);
}
static void
ath_key_update_end(struct ieee80211vap *vap)
{
struct ath_softc *sc = vap->iv_ic->ic_softc;
DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
taskqueue_unblock(sc->sc_tq);
}
static void
ath_update_promisc(struct ieee80211com *ic)
{
struct ath_softc *sc = ic->ic_softc;
u_int32_t rfilt;
/* configure rx filter */
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
rfilt = ath_calcrxfilter(sc);
ath_hal_setrxfilter(sc->sc_ah, rfilt);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
}
/*
* Driver-internal mcast update call.
*
* Assumes the hardware is already awake.
*/
static void
ath_update_mcast_hw(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
u_int32_t mfilt[2];
/* calculate and install multicast filter */
- if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
+ if (ic->ic_allmulti == 0) {
+ struct ieee80211vap *vap;
+ struct ifnet *ifp;
struct ifmultiaddr *ifma;
+
/*
* Merge multicast addresses to form the hardware filter.
*/
mfilt[0] = mfilt[1] = 0;
- if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- caddr_t dl;
- u_int32_t val;
- u_int8_t pos;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ ifp = vap->iv_ifp;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ caddr_t dl;
+ uint32_t val;
+ uint8_t pos;
- /* calculate XOR of eight 6bit values */
- dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
- val = LE_READ_4(dl + 0);
- pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
- val = LE_READ_4(dl + 3);
- pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
- pos &= 0x3f;
- mfilt[pos / 32] |= (1 << (pos % 32));
+ /* calculate XOR of eight 6bit values */
+ dl = LLADDR((struct sockaddr_dl *)
+ ifma->ifma_addr);
+ val = LE_READ_4(dl + 0);
+ pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^
+ val;
+ val = LE_READ_4(dl + 3);
+ pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^
+ val;
+ pos &= 0x3f;
+ mfilt[pos / 32] |= (1 << (pos % 32));
+ }
+ if_maddr_runlock(ifp);
}
- if_maddr_runlock(ifp);
} else
mfilt[0] = mfilt[1] = ~0;
ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
__func__, mfilt[0], mfilt[1]);
}
/*
* Called from the net80211 layer - force the hardware
* awake before operating.
*/
static void
ath_update_mcast(struct ieee80211com *ic)
{
struct ath_softc *sc = ic->ic_softc;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_update_mcast_hw(sc);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
}
void
ath_mode_init(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
u_int32_t rfilt;
/* configure rx filter */
rfilt = ath_calcrxfilter(sc);
ath_hal_setrxfilter(ah, rfilt);
/* configure operational mode */
ath_hal_setopmode(ah);
- DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE,
- "%s: ah=%p, ifp=%p, if_addr=%p\n",
- __func__,
- ah,
- ifp,
- (ifp == NULL) ? NULL : ifp->if_addr);
-
/* handle any link-level address change */
- ath_hal_setmac(ah, IF_LLADDR(ifp));
+ ath_hal_setmac(ah, ic->ic_macaddr);
/* calculate and install multicast filter */
ath_update_mcast_hw(sc);
}
/*
* Set the slot time based on the current setting.
*/
void
ath_setslottime(struct ath_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
u_int usec;
if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
usec = 13;
else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
usec = 21;
else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
/* honor short/long slot time only in 11g */
/* XXX shouldn't honor on pure g or turbo g channel */
if (ic->ic_flags & IEEE80211_F_SHSLOT)
usec = HAL_SLOT_TIME_9;
else
usec = HAL_SLOT_TIME_20;
} else
usec = HAL_SLOT_TIME_9;
DPRINTF(sc, ATH_DEBUG_RESET,
"%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
__func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
/* Wake up the hardware first before updating the slot time */
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ath_hal_setslottime(ah, usec);
ath_power_restore_power_state(sc);
sc->sc_updateslot = OK;
ATH_UNLOCK(sc);
}
/*
* Callback from the 802.11 layer to update the
* slot time based on the current setting.
*/
static void
ath_updateslot(struct ieee80211com *ic)
{
struct ath_softc *sc = ic->ic_softc;
/*
* When not coordinating the BSS, change the hardware
* immediately. For other operation we defer the change
* until beacon updates have propagated to the stations.
*
* XXX sc_updateslot isn't changed behind a lock?
*/
if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS)
sc->sc_updateslot = UPDATE;
else
ath_setslottime(sc);
}
/*
* Append the contents of src to dst; both queues
* are assumed to be locked.
*/
void
ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
{
ATH_TXQ_LOCK_ASSERT(src);
ATH_TXQ_LOCK_ASSERT(dst);
TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
dst->axq_link = src->axq_link;
src->axq_link = NULL;
dst->axq_depth += src->axq_depth;
dst->axq_aggr_depth += src->axq_aggr_depth;
src->axq_depth = 0;
src->axq_aggr_depth = 0;
}
/*
* Reset the hardware, with no loss.
*
* This can't be used for a general case reset.
*/
static void
ath_reset_proc(void *arg, int pending)
{
struct ath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
#if 0
device_printf(sc->sc_dev, "%s: resetting\n", __func__);
#endif
- ath_reset(ifp, ATH_RESET_NOLOSS);
+ ath_reset(sc, ATH_RESET_NOLOSS);
}
/*
* Reset the hardware after detecting beacons have stopped.
*/
static void
ath_bstuck_proc(void *arg, int pending)
{
struct ath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t hangs = 0;
if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
device_printf(sc->sc_dev, "bb hang detected (0x%x)\n", hangs);
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON))
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL);
#endif
device_printf(sc->sc_dev, "stuck beacon; resetting (bmiss count %u)\n",
sc->sc_bmisscount);
sc->sc_stats.ast_bstuck++;
/*
* This assumes that there's no simultaneous channel mode change
* occuring.
*/
- ath_reset(ifp, ATH_RESET_NOLOSS);
+ ath_reset(sc, ATH_RESET_NOLOSS);
}
static void
ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
bus_addr_t *paddr = (bus_addr_t*) arg;
KASSERT(error == 0, ("error %u on bus_dma callback", error));
*paddr = segs->ds_addr;
}
/*
* Allocate the descriptors and appropriate DMA tag/setup.
*
* For some situations (eg EDMA TX completion), there isn't a requirement
* for the ath_buf entries to be allocated.
*/
int
ath_descdma_alloc_desc(struct ath_softc *sc,
struct ath_descdma *dd, ath_bufhead *head,
const char *name, int ds_size, int ndesc)
{
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
int error;
dd->dd_descsize = ds_size;
DPRINTF(sc, ATH_DEBUG_RESET,
"%s: %s DMA: %u desc, %d bytes per descriptor\n",
__func__, name, ndesc, dd->dd_descsize);
dd->dd_name = name;
dd->dd_desc_len = dd->dd_descsize * ndesc;
/*
* Merlin work-around:
* Descriptors that cross the 4KB boundary can't be used.
* Assume one skipped descriptor per 4KB page.
*/
if (! ath_hal_split4ktrans(sc->sc_ah)) {
int numpages = dd->dd_desc_len / 4096;
dd->dd_desc_len += ds_size * numpages;
}
/*
* Setup DMA descriptor area.
*
* BUS_DMA_ALLOCNOW is not used; we never use bounce
* buffers for the descriptors themselves.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
PAGE_SIZE, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dd->dd_desc_len, /* maxsize */
1, /* nsegments */
dd->dd_desc_len, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&dd->dd_dmat);
if (error != 0) {
device_printf(sc->sc_dev,
"cannot allocate %s DMA tag\n", dd->dd_name);
return error;
}
/* allocate descriptors */
error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
&dd->dd_dmamap);
if (error != 0) {
device_printf(sc->sc_dev,
"unable to alloc memory for %u %s descriptors, error %u\n",
ndesc, dd->dd_name, error);
goto fail1;
}
error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
dd->dd_desc, dd->dd_desc_len,
ath_load_cb, &dd->dd_desc_paddr,
BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev,
"unable to map %s descriptors, error %u\n",
dd->dd_name, error);
goto fail2;
}
DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
__func__, dd->dd_name, (uint8_t *) dd->dd_desc,
(u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr,
/*XXX*/ (u_long) dd->dd_desc_len);
return (0);
fail2:
bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
fail1:
bus_dma_tag_destroy(dd->dd_dmat);
memset(dd, 0, sizeof(*dd));
return error;
#undef DS2PHYS
#undef ATH_DESC_4KB_BOUND_CHECK
}
int
ath_descdma_setup(struct ath_softc *sc,
struct ath_descdma *dd, ath_bufhead *head,
const char *name, int ds_size, int nbuf, int ndesc)
{
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
uint8_t *ds;
struct ath_buf *bf;
int i, bsize, error;
/* Allocate descriptors */
error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size,
nbuf * ndesc);
/* Assume any errors during allocation were dealt with */
if (error != 0) {
return (error);
}
ds = (uint8_t *) dd->dd_desc;
/* allocate rx buffers */
bsize = sizeof(struct ath_buf) * nbuf;
bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
if (bf == NULL) {
device_printf(sc->sc_dev,
"malloc of %s buffers failed, size %u\n",
dd->dd_name, bsize);
goto fail3;
}
dd->dd_bufptr = bf;
TAILQ_INIT(head);
for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) {
bf->bf_desc = (struct ath_desc *) ds;
bf->bf_daddr = DS2PHYS(dd, ds);
if (! ath_hal_split4ktrans(sc->sc_ah)) {
/*
* Merlin WAR: Skip descriptor addresses which
* cause 4KB boundary crossing along any point
* in the descriptor.
*/
if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
dd->dd_descsize)) {
/* Start at the next page */
ds += 0x1000 - (bf->bf_daddr & 0xFFF);
bf->bf_desc = (struct ath_desc *) ds;
bf->bf_daddr = DS2PHYS(dd, ds);
}
}
error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
&bf->bf_dmamap);
if (error != 0) {
device_printf(sc->sc_dev, "unable to create dmamap "
"for %s buffer %u, error %u\n",
dd->dd_name, i, error);
ath_descdma_cleanup(sc, dd, head);
return error;
}
bf->bf_lastds = bf->bf_desc; /* Just an initial value */
TAILQ_INSERT_TAIL(head, bf, bf_list);
}
/*
* XXX TODO: ensure that ds doesn't overflow the descriptor
* allocation otherwise weird stuff will occur and crash your
* machine.
*/
return 0;
/* XXX this should likely just call ath_descdma_cleanup() */
fail3:
bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
bus_dma_tag_destroy(dd->dd_dmat);
memset(dd, 0, sizeof(*dd));
return error;
#undef DS2PHYS
#undef ATH_DESC_4KB_BOUND_CHECK
}
/*
* Allocate ath_buf entries but no descriptor contents.
*
* This is for RX EDMA where the descriptors are the header part of
* the RX buffer.
*/
int
ath_descdma_setup_rx_edma(struct ath_softc *sc,
struct ath_descdma *dd, ath_bufhead *head,
const char *name, int nbuf, int rx_status_len)
{
struct ath_buf *bf;
int i, bsize, error;
DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n",
__func__, name, nbuf);
dd->dd_name = name;
/*
* This is (mostly) purely for show. We're not allocating any actual
* descriptors here as EDMA RX has the descriptor be part
* of the RX buffer.
*
* However, dd_desc_len is used by ath_descdma_free() to determine
* whether we have already freed this DMA mapping.
*/
dd->dd_desc_len = rx_status_len * nbuf;
dd->dd_descsize = rx_status_len;
/* allocate rx buffers */
bsize = sizeof(struct ath_buf) * nbuf;
bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
if (bf == NULL) {
device_printf(sc->sc_dev,
"malloc of %s buffers failed, size %u\n",
dd->dd_name, bsize);
error = ENOMEM;
goto fail3;
}
dd->dd_bufptr = bf;
TAILQ_INIT(head);
for (i = 0; i < nbuf; i++, bf++) {
bf->bf_desc = NULL;
bf->bf_daddr = 0;
bf->bf_lastds = NULL; /* Just an initial value */
error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
&bf->bf_dmamap);
if (error != 0) {
device_printf(sc->sc_dev, "unable to create dmamap "
"for %s buffer %u, error %u\n",
dd->dd_name, i, error);
ath_descdma_cleanup(sc, dd, head);
return error;
}
TAILQ_INSERT_TAIL(head, bf, bf_list);
}
return 0;
fail3:
memset(dd, 0, sizeof(*dd));
return error;
}
void
ath_descdma_cleanup(struct ath_softc *sc,
struct ath_descdma *dd, ath_bufhead *head)
{
struct ath_buf *bf;
struct ieee80211_node *ni;
int do_warning = 0;
if (dd->dd_dmamap != 0) {
bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
bus_dma_tag_destroy(dd->dd_dmat);
}
if (head != NULL) {
TAILQ_FOREACH(bf, head, bf_list) {
if (bf->bf_m) {
/*
* XXX warn if there's buffers here.
* XXX it should have been freed by the
* owner!
*/
if (do_warning == 0) {
do_warning = 1;
device_printf(sc->sc_dev,
"%s: %s: mbuf should've been"
" unmapped/freed!\n",
__func__,
dd->dd_name);
}
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
bf->bf_m = NULL;
}
if (bf->bf_dmamap != NULL) {
bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
bf->bf_dmamap = NULL;
}
ni = bf->bf_node;
bf->bf_node = NULL;
if (ni != NULL) {
/*
* Reclaim node reference.
*/
ieee80211_free_node(ni);
}
}
}
if (head != NULL)
TAILQ_INIT(head);
if (dd->dd_bufptr != NULL)
free(dd->dd_bufptr, M_ATHDEV);
memset(dd, 0, sizeof(*dd));
}
static int
ath_desc_alloc(struct ath_softc *sc)
{
int error;
error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
"tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER);
if (error != 0) {
return error;
}
sc->sc_txbuf_cnt = ath_txbuf;
error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
"tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt,
ATH_TXDESC);
if (error != 0) {
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
return error;
}
/*
* XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the
* flag doesn't have to be set in ath_getbuf_locked().
*/
error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
"beacon", sc->sc_tx_desclen, ATH_BCBUF, 1);
if (error != 0) {
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
&sc->sc_txbuf_mgmt);
return error;
}
return 0;
}
static void
ath_desc_free(struct ath_softc *sc)
{
if (sc->sc_bdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
if (sc->sc_txdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
if (sc->sc_txdma_mgmt.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
&sc->sc_txbuf_mgmt);
}
static struct ieee80211_node *
ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ieee80211com *ic = vap->iv_ic;
struct ath_softc *sc = ic->ic_softc;
const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
struct ath_node *an;
an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
if (an == NULL) {
/* XXX stat+msg */
return NULL;
}
ath_rate_node_init(sc, an);
/* Setup the mutex - there's no associd yet so set the name to NULL */
snprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
device_get_nameunit(sc->sc_dev), an);
mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
/* XXX setup ath_tid */
ath_tx_tid_init(sc, an);
DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an);
return &an->an_node;
}
static void
ath_node_cleanup(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
struct ath_softc *sc = ic->ic_softc;
DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__,
ni->ni_macaddr, ":", ATH_NODE(ni));
/* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
ath_tx_node_flush(sc, ATH_NODE(ni));
ath_rate_node_cleanup(sc, ATH_NODE(ni));
sc->sc_node_cleanup(ni);
}
static void
ath_node_free(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
struct ath_softc *sc = ic->ic_softc;
DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__,
ni->ni_macaddr, ":", ATH_NODE(ni));
mtx_destroy(&ATH_NODE(ni)->an_mtx);
sc->sc_node_free(ni);
}
static void
ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
{
struct ieee80211com *ic = ni->ni_ic;
struct ath_softc *sc = ic->ic_softc;
struct ath_hal *ah = sc->sc_ah;
*rssi = ic->ic_node_getrssi(ni);
if (ni->ni_chan != IEEE80211_CHAN_ANYC)
*noise = ath_hal_getchannoise(ah, ni->ni_chan);
else
*noise = -95; /* nominally correct */
}
/*
* Set the default antenna.
*/
void
ath_setdefantenna(struct ath_softc *sc, u_int antenna)
{
struct ath_hal *ah = sc->sc_ah;
/* XXX block beacon interrupts */
ath_hal_setdefantenna(ah, antenna);
if (sc->sc_defant != antenna)
sc->sc_stats.ast_ant_defswitch++;
sc->sc_defant = antenna;
sc->sc_rxotherant = 0;
}
static void
ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
{
txq->axq_qnum = qnum;
txq->axq_ac = 0;
txq->axq_depth = 0;
txq->axq_aggr_depth = 0;
txq->axq_intrcnt = 0;
txq->axq_link = NULL;
txq->axq_softc = sc;
TAILQ_INIT(&txq->axq_q);
TAILQ_INIT(&txq->axq_tidq);
TAILQ_INIT(&txq->fifo.axq_q);
ATH_TXQ_LOCK_INIT(sc, txq);
}
/*
* Setup a h/w transmit queue.
*/
static struct ath_txq *
ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
struct ath_hal *ah = sc->sc_ah;
HAL_TXQ_INFO qi;
int qnum;
memset(&qi, 0, sizeof(qi));
qi.tqi_subtype = subtype;
qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
/*
* Enable interrupts only for EOL and DESC conditions.
* We mark tx descriptors to receive a DESC interrupt
* when a tx queue gets deep; otherwise waiting for the
* EOL to reap descriptors. Note that this is done to
* reduce interrupt load and this only defers reaping
* descriptors, never transmitting frames. Aside from
* reducing interrupts this also permits more concurrency.
* The only potential downside is if the tx queue backs
* up in which case the top half of the kernel may backup
* due to a lack of tx descriptors.
*/
if (sc->sc_isedma)
qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE |
HAL_TXQ_TXOKINT_ENABLE;
else
qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE |
HAL_TXQ_TXDESCINT_ENABLE;
qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
if (qnum == -1) {
/*
* NB: don't print a message, this happens
* normally on parts with too few tx queues
*/
return NULL;
}
if (qnum >= N(sc->sc_txq)) {
device_printf(sc->sc_dev,
"hal qnum %u out of range, max %zu!\n",
qnum, N(sc->sc_txq));
ath_hal_releasetxqueue(ah, qnum);
return NULL;
}
if (!ATH_TXQ_SETUP(sc, qnum)) {
ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
sc->sc_txqsetup |= 1<<qnum;
}
return &sc->sc_txq[qnum];
#undef N
}
/*
* Setup a hardware data transmit queue for the specified
* access control. The hal may not support all requested
* queues in which case it will return a reference to a
* previously setup queue. We record the mapping from ac's
* to h/w queues for use by ath_tx_start and also track
* the set of h/w queues being used to optimize work in the
* transmit interrupt handler and related routines.
*/
static int
ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
struct ath_txq *txq;
if (ac >= N(sc->sc_ac2q)) {
device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
ac, N(sc->sc_ac2q));
return 0;
}
txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
if (txq != NULL) {
txq->axq_ac = ac;
sc->sc_ac2q[ac] = txq;
return 1;
} else
return 0;
#undef N
}
/*
* Update WME parameters for a transmit queue.
*/
static int
ath_txq_update(struct ath_softc *sc, int ac)
{
#define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
#define ATH_TXOP_TO_US(v) (v<<5)
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_txq *txq = sc->sc_ac2q[ac];
struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
struct ath_hal *ah = sc->sc_ah;
HAL_TXQ_INFO qi;
ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
#ifdef IEEE80211_SUPPORT_TDMA
if (sc->sc_tdma) {
/*
* AIFS is zero so there's no pre-transmit wait. The
* burst time defines the slot duration and is configured
* through net80211. The QCU is setup to not do post-xmit
* back off, lockout all lower-priority QCU's, and fire
* off the DMA beacon alert timer which is setup based
* on the slot configuration.
*/
qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
| HAL_TXQ_TXERRINT_ENABLE
| HAL_TXQ_TXURNINT_ENABLE
| HAL_TXQ_TXEOLINT_ENABLE
| HAL_TXQ_DBA_GATED
| HAL_TXQ_BACKOFF_DISABLE
| HAL_TXQ_ARB_LOCKOUT_GLOBAL
;
qi.tqi_aifs = 0;
/* XXX +dbaprep? */
qi.tqi_readyTime = sc->sc_tdmaslotlen;
qi.tqi_burstTime = qi.tqi_readyTime;
} else {
#endif
/*
* XXX shouldn't this just use the default flags
* used in the previous queue setup?
*/
qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
| HAL_TXQ_TXERRINT_ENABLE
| HAL_TXQ_TXDESCINT_ENABLE
| HAL_TXQ_TXURNINT_ENABLE
| HAL_TXQ_TXEOLINT_ENABLE
;
qi.tqi_aifs = wmep->wmep_aifsn;
qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
qi.tqi_readyTime = 0;
qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
#ifdef IEEE80211_SUPPORT_TDMA
}
#endif
DPRINTF(sc, ATH_DEBUG_RESET,
"%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
__func__, txq->axq_qnum, qi.tqi_qflags,
qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
device_printf(sc->sc_dev, "unable to update hardware queue "
"parameters for %s traffic!\n", ieee80211_wme_acnames[ac]);
return 0;
} else {
ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
return 1;
}
#undef ATH_TXOP_TO_US
#undef ATH_EXPONENT_TO_VALUE
}
/*
* Callback from the 802.11 layer to update WME parameters.
*/
int
ath_wme_update(struct ieee80211com *ic)
{
struct ath_softc *sc = ic->ic_softc;
return !ath_txq_update(sc, WME_AC_BE) ||
!ath_txq_update(sc, WME_AC_BK) ||
!ath_txq_update(sc, WME_AC_VI) ||
!ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
}
/*
* Reclaim resources for a setup queue.
*/
static void
ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
{
ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
ATH_TXQ_LOCK_DESTROY(txq);
}
/*
* Reclaim all tx queue resources.
*/
static void
ath_tx_cleanup(struct ath_softc *sc)
{
int i;
ATH_TXBUF_LOCK_DESTROY(sc);
for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->sc_txq[i]);
}
/*
* Return h/w rate index for an IEEE rate (w/o basic rate bit)
* using the current rates in sc_rixmap.
*/
int
ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
{
int rix = sc->sc_rixmap[rate];
/* NB: return lowest rix for invalid rate */
return (rix == 0xff ? 0 : rix);
}
static void
ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
struct ath_buf *bf)
{
struct ieee80211_node *ni = bf->bf_node;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int sr, lr, pri;
if (ts->ts_status == 0) {
u_int8_t txant = ts->ts_antenna;
sc->sc_stats.ast_ant_tx[txant]++;
sc->sc_ant_tx[txant]++;
if (ts->ts_finaltsi != 0)
sc->sc_stats.ast_tx_altrate++;
pri = M_WME_GETAC(bf->bf_m);
if (pri >= WME_AC_VO)
ic->ic_wme.wme_hipri_traffic++;
if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)
ni->ni_inact = ni->ni_inact_reload;
} else {
if (ts->ts_status & HAL_TXERR_XRETRY)
sc->sc_stats.ast_tx_xretries++;
if (ts->ts_status & HAL_TXERR_FIFO)
sc->sc_stats.ast_tx_fifoerr++;
if (ts->ts_status & HAL_TXERR_FILT)
sc->sc_stats.ast_tx_filtered++;
if (ts->ts_status & HAL_TXERR_XTXOP)
sc->sc_stats.ast_tx_xtxop++;
if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
sc->sc_stats.ast_tx_timerexpired++;
if (bf->bf_m->m_flags & M_FF)
sc->sc_stats.ast_ff_txerr++;
}
/* XXX when is this valid? */
if (ts->ts_flags & HAL_TX_DESC_CFG_ERR)
sc->sc_stats.ast_tx_desccfgerr++;
/*
* This can be valid for successful frame transmission!
* If there's a TX FIFO underrun during aggregate transmission,
* the MAC will pad the rest of the aggregate with delimiters.
* If a BA is returned, the frame is marked as "OK" and it's up
* to the TX completion code to notice which frames weren't
* successfully transmitted.
*/
if (ts->ts_flags & HAL_TX_DATA_UNDERRUN)
sc->sc_stats.ast_tx_data_underrun++;
if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN)
sc->sc_stats.ast_tx_delim_underrun++;
sr = ts->ts_shortretry;
lr = ts->ts_longretry;
sc->sc_stats.ast_tx_shortretry += sr;
sc->sc_stats.ast_tx_longretry += lr;
}
/*
* The default completion. If fail is 1, this means
* "please don't retry the frame, and just return -1 status
* to the net80211 stack.
*/
void
ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
{
struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
int st;
if (fail == 1)
st = -1;
else
st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ?
ts->ts_status : HAL_TXERR_XRETRY;
#if 0
if (bf->bf_state.bfs_dobaw)
device_printf(sc->sc_dev,
"%s: bf %p: seqno %d: dobaw should've been cleared!\n",
__func__,
bf,
SEQNO(bf->bf_state.bfs_seqno));
#endif
if (bf->bf_next != NULL)
device_printf(sc->sc_dev,
"%s: bf %p: seqno %d: bf_next not NULL!\n",
__func__,
bf,
SEQNO(bf->bf_state.bfs_seqno));
/*
* Check if the node software queue is empty; if so
* then clear the TIM.
*
* This needs to be done before the buffer is freed as
* otherwise the node reference will have been released
* and the node may not actually exist any longer.
*
* XXX I don't like this belonging here, but it's cleaner
* to do it here right now then all the other places
* where ath_tx_default_comp() is called.
*
* XXX TODO: during drain, ensure that the callback is
* being called so we get a chance to update the TIM.
*/
if (bf->bf_node) {
ATH_TX_LOCK(sc);
ath_tx_update_tim(sc, bf->bf_node, 0);
ATH_TX_UNLOCK(sc);
}
/*
* Do any tx complete callback. Note this must
* be done before releasing the node reference.
* This will free the mbuf, release the net80211
* node and recycle the ath_buf.
*/
ath_tx_freebuf(sc, bf, st);
}
/*
* Update rate control with the given completion status.
*/
void
ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
int nframes, int nbad)
{
struct ath_node *an;
/* Only for unicast frames */
if (ni == NULL)
return;
an = ATH_NODE(ni);
ATH_NODE_UNLOCK_ASSERT(an);
if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
ATH_NODE_LOCK(an);
ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
ATH_NODE_UNLOCK(an);
}
}
/*
* Process the completion of the given buffer.
*
* This calls the rate control update and then the buffer completion.
* This will either free the buffer or requeue it. In any case, the
* bf pointer should be treated as invalid after this function is called.
*/
void
ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf)
{
struct ieee80211_node *ni = bf->bf_node;
ATH_TX_UNLOCK_ASSERT(sc);
ATH_TXQ_UNLOCK_ASSERT(txq);
/* If unicast frame, update general statistics */
if (ni != NULL) {
/* update statistics */
ath_tx_update_stats(sc, ts, bf);
}
/*
* Call the completion handler.
* The completion handler is responsible for
* calling the rate control code.
*
* Frames with no completion handler get the
* rate control code called here.
*/
if (bf->bf_comp == NULL) {
if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
(bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) {
/*
* XXX assume this isn't an aggregate
* frame.
*/
ath_tx_update_ratectrl(sc, ni,
bf->bf_state.bfs_rc, ts,
bf->bf_state.bfs_pktlen, 1,
(ts->ts_status == 0 ? 0 : 1));
}
ath_tx_default_comp(sc, bf, 0);
} else
bf->bf_comp(sc, bf, 0);
}
/*
* Process completed xmit descriptors from the specified queue.
* Kick the packet scheduler if needed. This can occur from this
* particular task.
*/
static int
ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
{
struct ath_hal *ah = sc->sc_ah;
struct ath_buf *bf;
struct ath_desc *ds;
struct ath_tx_status *ts;
struct ieee80211_node *ni;
#ifdef IEEE80211_SUPPORT_SUPERG
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
#endif /* IEEE80211_SUPPORT_SUPERG */
int nacked;
HAL_STATUS status;
DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
__func__, txq->axq_qnum,
(caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
txq->axq_link);
ATH_KTR(sc, ATH_KTR_TXCOMP, 4,
"ath_tx_processq: txq=%u head %p link %p depth %p",
txq->axq_qnum,
(caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
txq->axq_link,
txq->axq_depth);
nacked = 0;
for (;;) {
ATH_TXQ_LOCK(txq);
txq->axq_intrcnt = 0; /* reset periodic desc intr count */
bf = TAILQ_FIRST(&txq->axq_q);
if (bf == NULL) {
ATH_TXQ_UNLOCK(txq);
break;
}
ds = bf->bf_lastds; /* XXX must be setup correctly! */
ts = &bf->bf_status.ds_txstat;
status = ath_hal_txprocdesc(ah, ds, ts);
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
status == HAL_OK);
else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0))
ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
status == HAL_OK);
#endif
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq,
ATH_ALQ_EDMA_TXSTATUS)) {
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
sc->sc_tx_statuslen,
(char *) ds);
}
#endif
if (status == HAL_EINPROGRESS) {
ATH_KTR(sc, ATH_KTR_TXCOMP, 3,
"ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS",
txq->axq_qnum, bf, ds);
ATH_TXQ_UNLOCK(txq);
break;
}
ATH_TXQ_REMOVE(txq, bf, bf_list);
/*
* Sanity check.
*/
if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) {
device_printf(sc->sc_dev,
"%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n",
__func__,
txq->axq_qnum,
bf,
bf->bf_state.bfs_tx_queue);
}
if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) {
device_printf(sc->sc_dev,
"%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n",
__func__,
txq->axq_qnum,
bf->bf_last,
bf->bf_last->bf_state.bfs_tx_queue);
}
#if 0
if (txq->axq_depth > 0) {
/*
* More frames follow. Mark the buffer busy
* so it's not re-used while the hardware may
* still re-read the link field in the descriptor.
*
* Use the last buffer in an aggregate as that
* is where the hardware may be - intermediate
* descriptors won't be "busy".
*/
bf->bf_last->bf_flags |= ATH_BUF_BUSY;
} else
txq->axq_link = NULL;
#else
bf->bf_last->bf_flags |= ATH_BUF_BUSY;
#endif
if (bf->bf_state.bfs_aggr)
txq->axq_aggr_depth--;
ni = bf->bf_node;
ATH_KTR(sc, ATH_KTR_TXCOMP, 5,
"ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x",
txq->axq_qnum, bf, ds, ni, ts->ts_status);
/*
* If unicast frame was ack'd update RSSI,
* including the last rx time used to
* workaround phantom bmiss interrupts.
*/
if (ni != NULL && ts->ts_status == 0 &&
((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
nacked++;
sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
ts->ts_rssi);
}
ATH_TXQ_UNLOCK(txq);
/*
* Update statistics and call completion
*/
ath_tx_process_buf_completion(sc, txq, ts, bf);
/* XXX at this point, bf and ni may be totally invalid */
}
#ifdef IEEE80211_SUPPORT_SUPERG
/*
* Flush fast-frame staging queue when traffic slows.
*/
if (txq->axq_depth <= 1)
ieee80211_ff_flush(ic, txq->axq_ac);
#endif
/* Kick the software TXQ scheduler */
if (dosched) {
ATH_TX_LOCK(sc);
ath_txq_sched(sc, txq);
ATH_TX_UNLOCK(sc);
}
ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
"ath_tx_processq: txq=%u: done",
txq->axq_qnum);
return nacked;
}
#define TXQACTIVE(t, q) ( (t) & (1 << (q)))
/*
* Deferred processing of transmit interrupt; special-cased
* for a single hardware transmit queue (e.g. 5210 and 5211).
*/
static void
ath_tx_proc_q0(void *arg, int npending)
{
struct ath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t txqs;
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt++;
txqs = sc->sc_txq_active;
sc->sc_txq_active &= ~txqs;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
"ath_tx_proc_q0: txqs=0x%08x", txqs);
if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
/* XXX why is lastrx updated in tx code? */
sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
ath_tx_processq(sc, sc->sc_cabq, 1);
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
sc->sc_wd_timer = 0;
if (sc->sc_softled)
ath_led_event(sc, sc->sc_txrix);
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
ath_tx_kick(sc);
}
/*
* Deferred processing of transmit interrupt; special-cased
* for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
*/
static void
ath_tx_proc_q0123(void *arg, int npending)
{
struct ath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
int nacked;
uint32_t txqs;
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt++;
txqs = sc->sc_txq_active;
sc->sc_txq_active &= ~txqs;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
"ath_tx_proc_q0123: txqs=0x%08x", txqs);
/*
* Process each active queue.
*/
nacked = 0;
if (TXQACTIVE(txqs, 0))
nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
if (TXQACTIVE(txqs, 1))
nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
if (TXQACTIVE(txqs, 2))
nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
if (TXQACTIVE(txqs, 3))
nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
ath_tx_processq(sc, sc->sc_cabq, 1);
if (nacked)
sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
sc->sc_wd_timer = 0;
if (sc->sc_softled)
ath_led_event(sc, sc->sc_txrix);
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
ath_tx_kick(sc);
}
/*
* Deferred processing of transmit interrupt.
*/
static void
ath_tx_proc(void *arg, int npending)
{
struct ath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
int i, nacked;
uint32_t txqs;
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt++;
txqs = sc->sc_txq_active;
sc->sc_txq_active &= ~txqs;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs);
/*
* Process each active queue.
*/
nacked = 0;
for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
if (nacked)
sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
- /* XXX check this inside of IF_LOCK? */
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
sc->sc_wd_timer = 0;
if (sc->sc_softled)
ath_led_event(sc, sc->sc_txrix);
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
ath_tx_kick(sc);
}
#undef TXQACTIVE
/*
* Deferred processing of TXQ rescheduling.
*/
static void
ath_txq_sched_tasklet(void *arg, int npending)
{
struct ath_softc *sc = arg;
int i;
/* XXX is skipping ok? */
ATH_PCU_LOCK(sc);
#if 0
if (sc->sc_inreset_cnt > 0) {
device_printf(sc->sc_dev,
"%s: sc_inreset_cnt > 0; skipping\n", __func__);
ATH_PCU_UNLOCK(sc);
return;
}
#endif
sc->sc_txproc_cnt++;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ATH_TX_LOCK(sc);
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
ath_txq_sched(sc, &sc->sc_txq[i]);
}
}
ATH_TX_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
ATH_PCU_LOCK(sc);
sc->sc_txproc_cnt--;
ATH_PCU_UNLOCK(sc);
}
void
ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
{
ATH_TXBUF_LOCK_ASSERT(sc);
if (bf->bf_flags & ATH_BUF_MGMT)
TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
else {
TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
sc->sc_txbuf_cnt++;
if (sc->sc_txbuf_cnt > ath_txbuf) {
device_printf(sc->sc_dev,
"%s: sc_txbuf_cnt > %d?\n",
__func__,
ath_txbuf);
sc->sc_txbuf_cnt = ath_txbuf;
}
}
}
void
ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
{
ATH_TXBUF_LOCK_ASSERT(sc);
if (bf->bf_flags & ATH_BUF_MGMT)
TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
else {
TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
sc->sc_txbuf_cnt++;
if (sc->sc_txbuf_cnt > ATH_TXBUF) {
device_printf(sc->sc_dev,
"%s: sc_txbuf_cnt > %d?\n",
__func__,
ATH_TXBUF);
sc->sc_txbuf_cnt = ATH_TXBUF;
}
}
}
/*
* Free the holding buffer if it exists
*/
void
ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq)
{
ATH_TXBUF_UNLOCK_ASSERT(sc);
ATH_TXQ_LOCK_ASSERT(txq);
if (txq->axq_holdingbf == NULL)
return;
txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY;
ATH_TXBUF_LOCK(sc);
ath_returnbuf_tail(sc, txq->axq_holdingbf);
ATH_TXBUF_UNLOCK(sc);
txq->axq_holdingbf = NULL;
}
/*
* Add this buffer to the holding queue, freeing the previous
* one if it exists.
*/
static void
ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf)
{
struct ath_txq *txq;
txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
ATH_TXBUF_UNLOCK_ASSERT(sc);
ATH_TXQ_LOCK_ASSERT(txq);
/* XXX assert ATH_BUF_BUSY is set */
/* XXX assert the tx queue is under the max number */
if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) {
device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n",
__func__,
bf,
bf->bf_state.bfs_tx_queue);
bf->bf_flags &= ~ATH_BUF_BUSY;
ath_returnbuf_tail(sc, bf);
return;
}
ath_txq_freeholdingbuf(sc, txq);
txq->axq_holdingbf = bf;
}
/*
* Return a buffer to the pool and update the 'busy' flag on the
* previous 'tail' entry.
*
* This _must_ only be called when the buffer is involved in a completed
* TX. The logic is that if it was part of an active TX, the previous
* buffer on the list is now not involved in a halted TX DMA queue, waiting
* for restart (eg for TDMA.)
*
* The caller must free the mbuf and recycle the node reference.
*
* XXX This method of handling busy / holding buffers is insanely stupid.
* It requires bf_state.bfs_tx_queue to be correctly assigned. It would
* be much nicer if buffers in the processq() methods would instead be
* always completed there (pushed onto a txq or ath_bufhead) so we knew
* exactly what hardware queue they came from in the first place.
*/
void
ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
{
struct ath_txq *txq;
txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
/*
* If this buffer is busy, push it onto the holding queue.
*/
if (bf->bf_flags & ATH_BUF_BUSY) {
ATH_TXQ_LOCK(txq);
ath_txq_addholdingbuf(sc, bf);
ATH_TXQ_UNLOCK(txq);
return;
}
/*
* Not a busy buffer, so free normally
*/
ATH_TXBUF_LOCK(sc);
ath_returnbuf_tail(sc, bf);
ATH_TXBUF_UNLOCK(sc);
}
/*
* This is currently used by ath_tx_draintxq() and
* ath_tx_tid_free_pkts().
*
* It recycles a single ath_buf.
*/
void
ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
{
struct ieee80211_node *ni = bf->bf_node;
struct mbuf *m0 = bf->bf_m;
/*
* Make sure that we only sync/unload if there's an mbuf.
* If not (eg we cloned a buffer), the unload will have already
* occured.
*/
if (bf->bf_m != NULL) {
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
}
bf->bf_node = NULL;
bf->bf_m = NULL;
/* Free the buffer, it's not needed any longer */
ath_freebuf(sc, bf);
/* Pass the buffer back to net80211 - completing it */
ieee80211_tx_complete(ni, m0, status);
}
static struct ath_buf *
ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_buf *bf;
ATH_TXQ_LOCK_ASSERT(txq);
/*
* Drain the FIFO queue first, then if it's
* empty, move to the normal frame queue.
*/
bf = TAILQ_FIRST(&txq->fifo.axq_q);
if (bf != NULL) {
/*
* Is it the last buffer in this set?
* Decrement the FIFO counter.
*/
if (bf->bf_flags & ATH_BUF_FIFOEND) {
if (txq->axq_fifo_depth == 0) {
device_printf(sc->sc_dev,
"%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n",
__func__,
txq->axq_qnum,
txq->fifo.axq_depth);
} else
txq->axq_fifo_depth--;
}
ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
return (bf);
}
/*
* Debugging!
*/
if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) {
device_printf(sc->sc_dev,
"%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n",
__func__,
txq->axq_qnum,
txq->axq_fifo_depth,
txq->fifo.axq_depth);
}
/*
* Now drain the pending queue.
*/
bf = TAILQ_FIRST(&txq->axq_q);
if (bf == NULL) {
txq->axq_link = NULL;
return (NULL);
}
ATH_TXQ_REMOVE(txq, bf, bf_list);
return (bf);
}
void
ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
{
#ifdef ATH_DEBUG
struct ath_hal *ah = sc->sc_ah;
#endif
struct ath_buf *bf;
u_int ix;
/*
* NB: this assumes output has been stopped and
* we do not need to block ath_tx_proc
*/
for (ix = 0;; ix++) {
ATH_TXQ_LOCK(txq);
bf = ath_tx_draintxq_get_one(sc, txq);
if (bf == NULL) {
ATH_TXQ_UNLOCK(txq);
break;
}
if (bf->bf_state.bfs_aggr)
txq->axq_aggr_depth--;
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RESET) {
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int status = 0;
/*
* EDMA operation has a TX completion FIFO
* separate from the TX descriptor, so this
* method of checking the "completion" status
* is wrong.
*/
if (! sc->sc_isedma) {
status = (ath_hal_txprocdesc(ah,
bf->bf_lastds,
&bf->bf_status.ds_txstat) == HAL_OK);
}
ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status);
ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
bf->bf_m->m_len, 0, -1);
}
#endif /* ATH_DEBUG */
/*
* Since we're now doing magic in the completion
* functions, we -must- call it for aggregation
* destinations or BAW tracking will get upset.
*/
/*
* Clear ATH_BUF_BUSY; the completion handler
* will free the buffer.
*/
ATH_TXQ_UNLOCK(txq);
bf->bf_flags &= ~ATH_BUF_BUSY;
if (bf->bf_comp)
bf->bf_comp(sc, bf, 1);
else
ath_tx_default_comp(sc, bf, 1);
}
/*
* Free the holding buffer if it exists
*/
ATH_TXQ_LOCK(txq);
ath_txq_freeholdingbuf(sc, txq);
ATH_TXQ_UNLOCK(txq);
/*
* Drain software queued frames which are on
* active TIDs.
*/
ath_tx_txq_drain(sc, txq);
}
static void
ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_hal *ah = sc->sc_ah;
ATH_TXQ_LOCK_ASSERT(txq);
DPRINTF(sc, ATH_DEBUG_RESET,
"%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, "
"link %p, holdingbf=%p\n",
__func__,
txq->axq_qnum,
(caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
(int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)),
(int) ath_hal_numtxpending(ah, txq->axq_qnum),
txq->axq_flags,
txq->axq_link,
txq->axq_holdingbf);
(void) ath_hal_stoptxdma(ah, txq->axq_qnum);
/* We've stopped TX DMA, so mark this as stopped. */
txq->axq_flags &= ~ATH_TXQ_PUTRUNNING;
#ifdef ATH_DEBUG
if ((sc->sc_debug & ATH_DEBUG_RESET)
&& (txq->axq_holdingbf != NULL)) {
ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0);
}
#endif
}
int
ath_stoptxdma(struct ath_softc *sc)
{
struct ath_hal *ah = sc->sc_ah;
int i;
/* XXX return value */
if (sc->sc_invalid)
return 0;
if (!sc->sc_invalid) {
/* don't touch the hardware if marked invalid */
DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
__func__, sc->sc_bhalq,
(caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
NULL);
/* stop the beacon queue */
(void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
/* Stop the data queues */
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
ATH_TXQ_LOCK(&sc->sc_txq[i]);
ath_tx_stopdma(sc, &sc->sc_txq[i]);
ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
}
}
}
return 1;
}
#ifdef ATH_DEBUG
void
ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_hal *ah = sc->sc_ah;
struct ath_buf *bf;
int i = 0;
if (! (sc->sc_debug & ATH_DEBUG_RESET))
return;
device_printf(sc->sc_dev, "%s: Q%d: begin\n",
__func__, txq->axq_qnum);
TAILQ_FOREACH(bf, &txq->axq_q, bf_list) {
ath_printtxbuf(sc, bf, txq->axq_qnum, i,
ath_hal_txprocdesc(ah, bf->bf_lastds,
&bf->bf_status.ds_txstat) == HAL_OK);
i++;
}
device_printf(sc->sc_dev, "%s: Q%d: end\n",
__func__, txq->axq_qnum);
}
#endif /* ATH_DEBUG */
/*
* Drain the transmit queues and reclaim resources.
*/
void
ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
{
struct ath_hal *ah = sc->sc_ah;
- struct ifnet *ifp = sc->sc_ifp;
- int i;
struct ath_buf *bf_last;
+ int i;
(void) ath_stoptxdma(sc);
/*
* Dump the queue contents
*/
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
/*
* XXX TODO: should we just handle the completed TX frames
* here, whether or not the reset is a full one or not?
*/
if (ATH_TXQ_SETUP(sc, i)) {
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RESET)
ath_tx_dump(sc, &sc->sc_txq[i]);
#endif /* ATH_DEBUG */
if (reset_type == ATH_RESET_NOLOSS) {
ath_tx_processq(sc, &sc->sc_txq[i], 0);
ATH_TXQ_LOCK(&sc->sc_txq[i]);
/*
* Free the holding buffer; DMA is now
* stopped.
*/
ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]);
/*
* Setup the link pointer to be the
* _last_ buffer/descriptor in the list.
* If there's nothing in the list, set it
* to NULL.
*/
bf_last = ATH_TXQ_LAST(&sc->sc_txq[i],
axq_q_s);
if (bf_last != NULL) {
ath_hal_gettxdesclinkptr(ah,
bf_last->bf_lastds,
&sc->sc_txq[i].axq_link);
} else {
sc->sc_txq[i].axq_link = NULL;
}
ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
} else
ath_tx_draintxq(sc, &sc->sc_txq[i]);
}
}
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RESET) {
struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
if (bf != NULL && bf->bf_m != NULL) {
ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
ath_hal_txprocdesc(ah, bf->bf_lastds,
&bf->bf_status.ds_txstat) == HAL_OK);
- ieee80211_dump_pkt(ifp->if_l2com,
+ ieee80211_dump_pkt(&sc->sc_ic,
mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
0, -1);
}
}
#endif /* ATH_DEBUG */
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
sc->sc_wd_timer = 0;
}
/*
* Update internal state after a channel change.
*/
static void
ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
{
enum ieee80211_phymode mode;
/*
* Change channels and update the h/w rate map
* if we're switching; e.g. 11a to 11b/g.
*/
mode = ieee80211_chan2mode(chan);
if (mode != sc->sc_curmode)
ath_setcurmode(sc, mode);
sc->sc_curchan = chan;
}
/*
* Set/change channels. If the channel is really being changed,
* it's done by resetting the chip. To accomplish this we must
* first cleanup any pending DMA, then restart stuff after a la
* ath_init.
*/
static int
ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
int ret = 0;
/* Treat this as an interface reset */
ATH_PCU_UNLOCK_ASSERT(sc);
ATH_UNLOCK_ASSERT(sc);
/* (Try to) stop TX/RX from occuring */
taskqueue_block(sc->sc_tq);
ATH_PCU_LOCK(sc);
/* Disable interrupts */
ath_hal_intrset(ah, 0);
/* Stop new RX/TX/interrupt completion */
if (ath_reset_grablock(sc, 1) == 0) {
device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
__func__);
}
/* Stop pending RX/TX completion */
ath_txrx_stop_locked(sc);
ATH_PCU_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
__func__, ieee80211_chan2ieee(ic, chan),
chan->ic_freq, chan->ic_flags);
if (chan != sc->sc_curchan) {
HAL_STATUS status;
/*
* To switch channels clear any pending DMA operations;
* wait long enough for the RX fifo to drain, reset the
* hardware at the new frequency, and then re-enable
* the relevant bits of the h/w.
*/
#if 0
ath_hal_intrset(ah, 0); /* disable interrupts */
#endif
ath_stoprecv(sc, 1); /* turn off frame recv */
/*
* First, handle completed TX/RX frames.
*/
ath_rx_flush(sc);
ath_draintxq(sc, ATH_RESET_NOLOSS);
/*
* Next, flush the non-scheduled frames.
*/
ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
ath_update_chainmasks(sc, chan);
ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
sc->sc_cur_rxchainmask);
if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
device_printf(sc->sc_dev, "%s: unable to reset "
"channel %u (%u MHz, flags 0x%x), hal status %u\n",
__func__, ieee80211_chan2ieee(ic, chan),
chan->ic_freq, chan->ic_flags, status);
ret = EIO;
goto finish;
}
sc->sc_diversity = ath_hal_getdiversity(ah);
ATH_RX_LOCK(sc);
sc->sc_rx_stopped = 1;
sc->sc_rx_resetted = 1;
ATH_RX_UNLOCK(sc);
/* Let DFS at it in case it's a DFS channel */
ath_dfs_radar_enable(sc, chan);
/* Let spectral at in case spectral is enabled */
ath_spectral_enable(sc, chan);
/*
* Let bluetooth coexistence at in case it's needed for this
* channel
*/
ath_btcoex_enable(sc, ic->ic_curchan);
/*
* If we're doing TDMA, enforce the TXOP limitation for chips
* that support it.
*/
if (sc->sc_hasenforcetxop && sc->sc_tdma)
ath_hal_setenforcetxop(sc->sc_ah, 1);
else
ath_hal_setenforcetxop(sc->sc_ah, 0);
/*
* Re-enable rx framework.
*/
if (ath_startrecv(sc) != 0) {
device_printf(sc->sc_dev,
"%s: unable to restart recv logic\n", __func__);
ret = EIO;
goto finish;
}
/*
* Change channels and update the h/w rate map
* if we're switching; e.g. 11a to 11b/g.
*/
ath_chan_change(sc, chan);
/*
* Reset clears the beacon timers; reset them
* here if needed.
*/
if (sc->sc_beacons) { /* restart beacons */
#ifdef IEEE80211_SUPPORT_TDMA
if (sc->sc_tdma)
ath_tdma_config(sc, NULL);
else
#endif
ath_beacon_config(sc, NULL);
}
/*
* Re-enable interrupts.
*/
#if 0
ath_hal_intrset(ah, sc->sc_imask);
#endif
}
finish:
ATH_PCU_LOCK(sc);
sc->sc_inreset_cnt--;
/* XXX only do this if sc_inreset_cnt == 0? */
ath_hal_intrset(ah, sc->sc_imask);
ATH_PCU_UNLOCK(sc);
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
ath_txrx_start(sc);
/* XXX ath_start? */
return ret;
}
/*
* Periodically recalibrate the PHY to account
* for temperature/environment changes.
*/
static void
ath_calibrate(void *arg)
{
struct ath_softc *sc = arg;
struct ath_hal *ah = sc->sc_ah;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
HAL_BOOL longCal, isCalDone = AH_TRUE;
HAL_BOOL aniCal, shortCal = AH_FALSE;
int nextcal;
ATH_LOCK_ASSERT(sc);
/*
* Force the hardware awake for ANI work.
*/
ath_power_set_power_state(sc, HAL_PM_AWAKE);
/* Skip trying to do this if we're in reset */
if (sc->sc_inreset_cnt)
goto restart;
if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
goto restart;
longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
if (sc->sc_doresetcal)
shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
if (aniCal) {
sc->sc_stats.ast_ani_cal++;
sc->sc_lastani = ticks;
ath_hal_ani_poll(ah, sc->sc_curchan);
}
if (longCal) {
sc->sc_stats.ast_per_cal++;
sc->sc_lastlongcal = ticks;
if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
/*
* Rfgain is out of bounds, reset the chip
* to load new gain values.
*/
DPRINTF(sc, ATH_DEBUG_CALIBRATE,
"%s: rfgain change\n", __func__);
sc->sc_stats.ast_per_rfgain++;
sc->sc_resetcal = 0;
sc->sc_doresetcal = AH_TRUE;
taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
ath_power_restore_power_state(sc);
return;
}
/*
* If this long cal is after an idle period, then
* reset the data collection state so we start fresh.
*/
if (sc->sc_resetcal) {
(void) ath_hal_calreset(ah, sc->sc_curchan);
sc->sc_lastcalreset = ticks;
sc->sc_lastshortcal = ticks;
sc->sc_resetcal = 0;
sc->sc_doresetcal = AH_TRUE;
}
}
/* Only call if we're doing a short/long cal, not for ANI calibration */
if (shortCal || longCal) {
isCalDone = AH_FALSE;
if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
if (longCal) {
/*
* Calibrate noise floor data again in case of change.
*/
ath_hal_process_noisefloor(ah);
}
} else {
DPRINTF(sc, ATH_DEBUG_ANY,
"%s: calibration of channel %u failed\n",
__func__, sc->sc_curchan->ic_freq);
sc->sc_stats.ast_per_calfail++;
}
if (shortCal)
sc->sc_lastshortcal = ticks;
}
if (!isCalDone) {
restart:
/*
* Use a shorter interval to potentially collect multiple
* data samples required to complete calibration. Once
* we're told the work is done we drop back to a longer
* interval between requests. We're more aggressive doing
* work when operating as an AP to improve operation right
* after startup.
*/
sc->sc_lastshortcal = ticks;
nextcal = ath_shortcalinterval*hz/1000;
if (sc->sc_opmode != HAL_M_HOSTAP)
nextcal *= 10;
sc->sc_doresetcal = AH_TRUE;
} else {
/* nextcal should be the shortest time for next event */
nextcal = ath_longcalinterval*hz;
if (sc->sc_lastcalreset == 0)
sc->sc_lastcalreset = sc->sc_lastlongcal;
else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
sc->sc_resetcal = 1; /* setup reset next trip */
sc->sc_doresetcal = AH_FALSE;
}
/* ANI calibration may occur more often than short/long/resetcal */
if (ath_anicalinterval > 0)
nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
if (nextcal != 0) {
DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
__func__, nextcal, isCalDone ? "" : "!");
callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
} else {
DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
__func__);
/* NB: don't rearm timer */
}
/*
* Restore power state now that we're done.
*/
ath_power_restore_power_state(sc);
}
static void
ath_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
struct ath_softc *sc = ic->ic_softc;
struct ath_hal *ah = sc->sc_ah;
u_int32_t rfilt;
/* XXX calibration timer? */
+ /* XXXGL: is constant ieee80211broadcastaddr a correct choice? */
ATH_LOCK(sc);
sc->sc_scanning = 1;
sc->sc_syncbeacon = 0;
rfilt = ath_calcrxfilter(sc);
ATH_UNLOCK(sc);
ATH_PCU_LOCK(sc);
ath_hal_setrxfilter(ah, rfilt);
- ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
+ ath_hal_setassocid(ah, ieee80211broadcastaddr, 0);
ATH_PCU_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
- __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
+ __func__, rfilt, ether_sprintf(ieee80211broadcastaddr));
}
static void
ath_scan_end(struct ieee80211com *ic)
{
struct ath_softc *sc = ic->ic_softc;
struct ath_hal *ah = sc->sc_ah;
u_int32_t rfilt;
ATH_LOCK(sc);
sc->sc_scanning = 0;
rfilt = ath_calcrxfilter(sc);
ATH_UNLOCK(sc);
ATH_PCU_LOCK(sc);
ath_hal_setrxfilter(ah, rfilt);
ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
ath_hal_process_noisefloor(ah);
ATH_PCU_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
__func__, rfilt, ether_sprintf(sc->sc_curbssid),
sc->sc_curaid);
}
#ifdef ATH_ENABLE_11N
/*
* For now, just do a channel change.
*
* Later, we'll go through the hard slog of suspending tx/rx, changing rate
* control state and resetting the hardware without dropping frames out
* of the queue.
*
* The unfortunate trouble here is making absolutely sure that the
* channel width change has propagated enough so the hardware
* absolutely isn't handed bogus frames for it's current operating
* mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and
* does occur in parallel, we need to make certain we've blocked
* any further ongoing TX (and RX, that can cause raw TX)
* before we do this.
*/
static void
ath_update_chw(struct ieee80211com *ic)
{
struct ath_softc *sc = ic->ic_softc;
DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__);
ath_set_channel(ic);
}
#endif /* ATH_ENABLE_11N */
static void
ath_set_channel(struct ieee80211com *ic)
{
struct ath_softc *sc = ic->ic_softc;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
(void) ath_chan_set(sc, ic->ic_curchan);
/*
* If we are returning to our bss channel then mark state
* so the next recv'd beacon's tsf will be used to sync the
* beacon timers. Note that since we only hear beacons in
* sta/ibss mode this has no effect in other operating modes.
*/
ATH_LOCK(sc);
if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
sc->sc_syncbeacon = 1;
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
}
/*
* Walk the vap list and check if there any vap's in RUN state.
*/
static int
ath_isanyrunningvaps(struct ieee80211vap *this)
{
struct ieee80211com *ic = this->iv_ic;
struct ieee80211vap *vap;
IEEE80211_LOCK_ASSERT(ic);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
return 1;
}
return 0;
}
static int
ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ieee80211com *ic = vap->iv_ic;
struct ath_softc *sc = ic->ic_softc;
struct ath_vap *avp = ATH_VAP(vap);
struct ath_hal *ah = sc->sc_ah;
struct ieee80211_node *ni = NULL;
int i, error, stamode;
u_int32_t rfilt;
int csa_run_transition = 0;
enum ieee80211_state ostate = vap->iv_state;
static const HAL_LED_STATE leds[] = {
HAL_LED_INIT, /* IEEE80211_S_INIT */
HAL_LED_SCAN, /* IEEE80211_S_SCAN */
HAL_LED_AUTH, /* IEEE80211_S_AUTH */
HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
HAL_LED_RUN, /* IEEE80211_S_CAC */
HAL_LED_RUN, /* IEEE80211_S_RUN */
HAL_LED_RUN, /* IEEE80211_S_CSA */
HAL_LED_RUN, /* IEEE80211_S_SLEEP */
};
DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
/*
* net80211 _should_ have the comlock asserted at this point.
* There are some comments around the calls to vap->iv_newstate
* which indicate that it (newstate) may end up dropping the
* lock. This and the subsequent lock assert check after newstate
* are an attempt to catch these and figure out how/why.
*/
IEEE80211_LOCK_ASSERT(ic);
/* Before we touch the hardware - wake it up */
ATH_LOCK(sc);
/*
* If the NIC is in anything other than SLEEP state,
* we need to ensure that self-generated frames are
* set for PWRMGT=0. Otherwise we may end up with
* strange situations.
*
* XXX TODO: is this actually the case? :-)
*/
if (nstate != IEEE80211_S_SLEEP)
ath_power_setselfgen(sc, HAL_PM_AWAKE);
/*
* Now, wake the thing up.
*/
ath_power_set_power_state(sc, HAL_PM_AWAKE);
/*
* And stop the calibration callout whilst we have
* ATH_LOCK held.
*/
callout_stop(&sc->sc_cal_ch);
ATH_UNLOCK(sc);
if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
csa_run_transition = 1;
ath_hal_setledstate(ah, leds[nstate]); /* set LED */
if (nstate == IEEE80211_S_SCAN) {
/*
* Scanning: turn off beacon miss and don't beacon.
* Mark beacon state so when we reach RUN state we'll
* [re]setup beacons. Unblock the task q thread so
* deferred interrupt processing is done.
*/
/* Ensure we stay awake during scan */
ATH_LOCK(sc);
ath_power_setselfgen(sc, HAL_PM_AWAKE);
ath_power_setpower(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_hal_intrset(ah,
sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
sc->sc_beacons = 0;
taskqueue_unblock(sc->sc_tq);
}
ni = ieee80211_ref_node(vap->iv_bss);
rfilt = ath_calcrxfilter(sc);
stamode = (vap->iv_opmode == IEEE80211_M_STA ||
vap->iv_opmode == IEEE80211_M_AHDEMO ||
vap->iv_opmode == IEEE80211_M_IBSS);
/*
* XXX Dont need to do this (and others) if we've transitioned
* from SLEEP->RUN.
*/
if (stamode && nstate == IEEE80211_S_RUN) {
sc->sc_curaid = ni->ni_associd;
IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
}
DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
__func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
ath_hal_setrxfilter(ah, rfilt);
/* XXX is this to restore keycache on resume? */
if (vap->iv_opmode != IEEE80211_M_STA &&
(vap->iv_flags & IEEE80211_F_PRIVACY)) {
for (i = 0; i < IEEE80211_WEP_NKID; i++)
if (ath_hal_keyisvalid(ah, i))
ath_hal_keysetmac(ah, i, ni->ni_bssid);
}
/*
* Invoke the parent method to do net80211 work.
*/
error = avp->av_newstate(vap, nstate, arg);
if (error != 0)
goto bad;
/*
* See above: ensure av_newstate() doesn't drop the lock
* on us.
*/
IEEE80211_LOCK_ASSERT(ic);
if (nstate == IEEE80211_S_RUN) {
/* NB: collect bss node again, it may have changed */
ieee80211_free_node(ni);
ni = ieee80211_ref_node(vap->iv_bss);
DPRINTF(sc, ATH_DEBUG_STATE,
"%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
"capinfo 0x%04x chan %d\n", __func__,
vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
switch (vap->iv_opmode) {
#ifdef IEEE80211_SUPPORT_TDMA
case IEEE80211_M_AHDEMO:
if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
break;
/* fall thru... */
#endif
case IEEE80211_M_HOSTAP:
case IEEE80211_M_IBSS:
case IEEE80211_M_MBSS:
/*
* Allocate and setup the beacon frame.
*
* Stop any previous beacon DMA. This may be
* necessary, for example, when an ibss merge
* causes reconfiguration; there will be a state
* transition from RUN->RUN that means we may
* be called with beacon transmission active.
*/
ath_hal_stoptxdma(ah, sc->sc_bhalq);
error = ath_beacon_alloc(sc, ni);
if (error != 0)
goto bad;
/*
* If joining an adhoc network defer beacon timer
* configuration to the next beacon frame so we
* have a current TSF to use. Otherwise we're
* starting an ibss/bss so there's no need to delay;
* if this is the first vap moving to RUN state, then
* beacon state needs to be [re]configured.
*/
if (vap->iv_opmode == IEEE80211_M_IBSS &&
ni->ni_tstamp.tsf != 0) {
sc->sc_syncbeacon = 1;
} else if (!sc->sc_beacons) {
#ifdef IEEE80211_SUPPORT_TDMA
if (vap->iv_caps & IEEE80211_C_TDMA)
ath_tdma_config(sc, vap);
else
#endif
ath_beacon_config(sc, vap);
sc->sc_beacons = 1;
}
break;
case IEEE80211_M_STA:
/*
* Defer beacon timer configuration to the next
* beacon frame so we have a current TSF to use
* (any TSF collected when scanning is likely old).
* However if it's due to a CSA -> RUN transition,
* force a beacon update so we pick up a lack of
* beacons from an AP in CAC and thus force a
* scan.
*
* And, there's also corner cases here where
* after a scan, the AP may have disappeared.
* In that case, we may not receive an actual
* beacon to update the beacon timer and thus we
* won't get notified of the missing beacons.
*/
if (ostate != IEEE80211_S_RUN &&
ostate != IEEE80211_S_SLEEP) {
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: STA; syncbeacon=1\n", __func__);
sc->sc_syncbeacon = 1;
if (csa_run_transition)
ath_beacon_config(sc, vap);
/*
* PR: kern/175227
*
* Reconfigure beacons during reset; as otherwise
* we won't get the beacon timers reprogrammed
* after a reset and thus we won't pick up a
* beacon miss interrupt.
*
* Hopefully we'll see a beacon before the BMISS
* timer fires (too often), leading to a STA
* disassociation.
*/
sc->sc_beacons = 1;
}
break;
case IEEE80211_M_MONITOR:
/*
* Monitor mode vaps have only INIT->RUN and RUN->RUN
* transitions so we must re-enable interrupts here to
* handle the case of a single monitor mode vap.
*/
ath_hal_intrset(ah, sc->sc_imask);
break;
case IEEE80211_M_WDS:
break;
default:
break;
}
/*
* Let the hal process statistics collected during a
* scan so it can provide calibrated noise floor data.
*/
ath_hal_process_noisefloor(ah);
/*
* Reset rssi stats; maybe not the best place...
*/
sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
/*
* Force awake for RUN mode.
*/
ATH_LOCK(sc);
ath_power_setselfgen(sc, HAL_PM_AWAKE);
ath_power_setpower(sc, HAL_PM_AWAKE);
/*
* Finally, start any timers and the task q thread
* (in case we didn't go through SCAN state).
*/
if (ath_longcalinterval != 0) {
/* start periodic recalibration timer */
callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
} else {
DPRINTF(sc, ATH_DEBUG_CALIBRATE,
"%s: calibration disabled\n", __func__);
}
ATH_UNLOCK(sc);
taskqueue_unblock(sc->sc_tq);
} else if (nstate == IEEE80211_S_INIT) {
/*
* If there are no vaps left in RUN state then
* shutdown host/driver operation:
* o disable interrupts
* o disable the task queue thread
* o mark beacon processing as stopped
*/
if (!ath_isanyrunningvaps(vap)) {
sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
/* disable interrupts */
ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
taskqueue_block(sc->sc_tq);
sc->sc_beacons = 0;
}
#ifdef IEEE80211_SUPPORT_TDMA
ath_hal_setcca(ah, AH_TRUE);
#endif
} else if (nstate == IEEE80211_S_SLEEP) {
/* We're going to sleep, so transition appropriately */
/* For now, only do this if we're a single STA vap */
if (sc->sc_nvaps == 1 &&
vap->iv_opmode == IEEE80211_M_STA) {
DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon);
ATH_LOCK(sc);
/*
* Always at least set the self-generated
* frame config to set PWRMGT=1.
*/
ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP);
/*
* If we're not syncing beacons, transition
* to NETWORK_SLEEP.
*
* We stay awake if syncbeacon > 0 in case
* we need to listen for some beacons otherwise
* our beacon timer config may be wrong.
*/
if (sc->sc_syncbeacon == 0) {
ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP);
}
ATH_UNLOCK(sc);
}
}
bad:
ieee80211_free_node(ni);
/*
* Restore the power state - either to what it was, or
* to network_sleep if it's alright.
*/
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return error;
}
/*
* Allocate a key cache slot to the station so we can
* setup a mapping from key index to node. The key cache
* slot is needed for managing antenna state and for
* compression when stations do not use crypto. We do
* it uniliaterally here; if crypto is employed this slot
* will be reassigned.
*/
static void
ath_setup_stationkey(struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ath_softc *sc = vap->iv_ic->ic_softc;
ieee80211_keyix keyix, rxkeyix;
/* XXX should take a locked ref to vap->iv_bss */
if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
/*
* Key cache is full; we'll fall back to doing
* the more expensive lookup in software. Note
* this also means no h/w compression.
*/
/* XXX msg+statistic */
} else {
/* XXX locking? */
ni->ni_ucastkey.wk_keyix = keyix;
ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
/* NB: must mark device key to get called back on delete */
ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
/* NB: this will create a pass-thru key entry */
ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
}
}
/*
* Setup driver-specific state for a newly associated node.
* Note that we're called also on a re-associate, the isnew
* param tells us if this is the first time or not.
*/
static void
ath_newassoc(struct ieee80211_node *ni, int isnew)
{
struct ath_node *an = ATH_NODE(ni);
struct ieee80211vap *vap = ni->ni_vap;
struct ath_softc *sc = vap->iv_ic->ic_softc;
const struct ieee80211_txparam *tp = ni->ni_txparms;
an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n",
__func__,
ni->ni_macaddr,
":",
isnew,
an->an_is_powersave);
ATH_NODE_LOCK(an);
ath_rate_newassoc(sc, an, isnew);
ATH_NODE_UNLOCK(an);
if (isnew &&
(vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
ath_setup_stationkey(ni);
/*
* If we're reassociating, make sure that any paused queues
* get unpaused.
*
* Now, we may hvae frames in the hardware queue for this node.
* So if we are reassociating and there are frames in the queue,
* we need to go through the cleanup path to ensure that they're
* marked as non-aggregate.
*/
if (! isnew) {
DPRINTF(sc, ATH_DEBUG_NODE,
"%s: %6D: reassoc; is_powersave=%d\n",
__func__,
ni->ni_macaddr,
":",
an->an_is_powersave);
/* XXX for now, we can't hold the lock across assoc */
ath_tx_node_reassoc(sc, an);
/* XXX for now, we can't hold the lock across wakeup */
if (an->an_is_powersave)
ath_tx_node_wakeup(sc, an);
}
}
static int
ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
int nchans, struct ieee80211_channel chans[])
{
struct ath_softc *sc = ic->ic_softc;
struct ath_hal *ah = sc->sc_ah;
HAL_STATUS status;
DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
"%s: rd %u cc %u location %c%s\n",
__func__, reg->regdomain, reg->country, reg->location,
reg->ecm ? " ecm" : "");
status = ath_hal_set_channels(ah, chans, nchans,
reg->country, reg->regdomain);
if (status != HAL_OK) {
DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
__func__, status);
return EINVAL; /* XXX */
}
return 0;
}
static void
ath_getradiocaps(struct ieee80211com *ic,
int maxchans, int *nchans, struct ieee80211_channel chans[])
{
struct ath_softc *sc = ic->ic_softc;
struct ath_hal *ah = sc->sc_ah;
DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
__func__, SKU_DEBUG, CTRY_DEFAULT);
/* XXX check return */
(void) ath_hal_getchannels(ah, chans, maxchans, nchans,
HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
}
static int
ath_getchannels(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
HAL_STATUS status;
/*
* Collect channel set based on EEPROM contents.
*/
status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
&ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
if (status != HAL_OK) {
device_printf(sc->sc_dev,
"%s: unable to collect channel list from hal, status %d\n",
__func__, status);
return EINVAL;
}
(void) ath_hal_getregdomain(ah, &sc->sc_eerd);
ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
/* XXX map Atheros sku's to net80211 SKU's */
/* XXX net80211 types too small */
ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
ic->ic_regdomain.isocc[1] = ' ';
ic->ic_regdomain.ecm = 1;
ic->ic_regdomain.location = 'I';
DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
"%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
__func__, sc->sc_eerd, sc->sc_eecc,
ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
return 0;
}
static int
ath_rate_setup(struct ath_softc *sc, u_int mode)
{
struct ath_hal *ah = sc->sc_ah;
const HAL_RATE_TABLE *rt;
switch (mode) {
case IEEE80211_MODE_11A:
rt = ath_hal_getratetable(ah, HAL_MODE_11A);
break;
case IEEE80211_MODE_HALF:
rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
break;
case IEEE80211_MODE_QUARTER:
rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
break;
case IEEE80211_MODE_11B:
rt = ath_hal_getratetable(ah, HAL_MODE_11B);
break;
case IEEE80211_MODE_11G:
rt = ath_hal_getratetable(ah, HAL_MODE_11G);
break;
case IEEE80211_MODE_TURBO_A:
rt = ath_hal_getratetable(ah, HAL_MODE_108A);
break;
case IEEE80211_MODE_TURBO_G:
rt = ath_hal_getratetable(ah, HAL_MODE_108G);
break;
case IEEE80211_MODE_STURBO_A:
rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
break;
case IEEE80211_MODE_11NA:
rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
break;
case IEEE80211_MODE_11NG:
rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
break;
default:
DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
__func__, mode);
return 0;
}
sc->sc_rates[mode] = rt;
return (rt != NULL);
}
static void
ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
/* NB: on/off times from the Atheros NDIS driver, w/ permission */
static const struct {
u_int rate; /* tx/rx 802.11 rate */
u_int16_t timeOn; /* LED on time (ms) */
u_int16_t timeOff; /* LED off time (ms) */
} blinkrates[] = {
{ 108, 40, 10 },
{ 96, 44, 11 },
{ 72, 50, 13 },
{ 48, 57, 14 },
{ 36, 67, 16 },
{ 24, 80, 20 },
{ 22, 100, 25 },
{ 18, 133, 34 },
{ 12, 160, 40 },
{ 10, 200, 50 },
{ 6, 240, 58 },
{ 4, 267, 66 },
{ 2, 400, 100 },
{ 0, 500, 130 },
/* XXX half/quarter rates */
};
const HAL_RATE_TABLE *rt;
int i, j;
memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
rt = sc->sc_rates[mode];
KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
for (i = 0; i < rt->rateCount; i++) {
uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
if (rt->info[i].phy != IEEE80211_T_HT)
sc->sc_rixmap[ieeerate] = i;
else
sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
}
memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
for (i = 0; i < N(sc->sc_hwmap); i++) {
if (i >= rt->rateCount) {
sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
continue;
}
sc->sc_hwmap[i].ieeerate =
rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
if (rt->info[i].phy == IEEE80211_T_HT)
sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
if (rt->info[i].shortPreamble ||
rt->info[i].phy == IEEE80211_T_OFDM)
sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
for (j = 0; j < N(blinkrates)-1; j++)
if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
break;
/* NB: this uses the last entry if the rate isn't found */
/* XXX beware of overlow */
sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
}
sc->sc_currates = rt;
sc->sc_curmode = mode;
/*
* All protection frames are transmited at 2Mb/s for
* 11g, otherwise at 1Mb/s.
*/
if (mode == IEEE80211_MODE_11G)
sc->sc_protrix = ath_tx_findrix(sc, 2*2);
else
sc->sc_protrix = ath_tx_findrix(sc, 2*1);
/* NB: caller is responsible for resetting rate control state */
#undef N
}
static void
ath_watchdog(void *arg)
{
struct ath_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
int do_reset = 0;
ATH_LOCK_ASSERT(sc);
if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
- struct ifnet *ifp = sc->sc_ifp;
uint32_t hangs;
ath_power_set_power_state(sc, HAL_PM_AWAKE);
if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
hangs != 0) {
device_printf(sc->sc_dev, "%s hang detected (0x%x)\n",
hangs & 0xff ? "bb" : "mac", hangs);
} else
device_printf(sc->sc_dev, "device timeout\n");
do_reset = 1;
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(ic->ic_oerrors, 1);
sc->sc_stats.ast_watchdog++;
ath_power_restore_power_state(sc);
}
/*
* We can't hold the lock across the ath_reset() call.
*
* And since this routine can't hold a lock and sleep,
* do the reset deferred.
*/
if (do_reset) {
taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
}
callout_schedule(&sc->sc_wd_ch, hz);
}
/*
* Fetch the rate control statistics for the given node.
*/
static int
ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs)
{
struct ath_node *an;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
int error = 0;
/* Perform a lookup on the given node */
ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr);
if (ni == NULL) {
error = EINVAL;
goto bad;
}
/* Lock the ath_node */
an = ATH_NODE(ni);
ATH_NODE_LOCK(an);
/* Fetch the rate control stats for this node */
error = ath_rate_fetch_node_stats(sc, an, rs);
/* No matter what happens here, just drop through */
/* Unlock the ath_node */
ATH_NODE_UNLOCK(an);
/* Unref the node */
ieee80211_node_decref(ni);
bad:
return (error);
}
#ifdef ATH_DIAGAPI
/*
* Diagnostic interface to the HAL. This is used by various
* tools to do things like retrieve register contents for
* debugging. The mechanism is intentionally opaque so that
* it can change frequently w/o concern for compatiblity.
*/
static int
ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
{
struct ath_hal *ah = sc->sc_ah;
u_int id = ad->ad_id & ATH_DIAG_ID;
void *indata = NULL;
void *outdata = NULL;
u_int32_t insize = ad->ad_in_size;
u_int32_t outsize = ad->ad_out_size;
int error = 0;
if (ad->ad_id & ATH_DIAG_IN) {
/*
* Copy in data.
*/
indata = malloc(insize, M_TEMP, M_NOWAIT);
if (indata == NULL) {
error = ENOMEM;
goto bad;
}
error = copyin(ad->ad_in_data, indata, insize);
if (error)
goto bad;
}
if (ad->ad_id & ATH_DIAG_DYN) {
/*
* Allocate a buffer for the results (otherwise the HAL
* returns a pointer to a buffer where we can read the
* results). Note that we depend on the HAL leaving this
* pointer for us to use below in reclaiming the buffer;
* may want to be more defensive.
*/
outdata = malloc(outsize, M_TEMP, M_NOWAIT);
if (outdata == NULL) {
error = ENOMEM;
goto bad;
}
}
ATH_LOCK(sc);
if (id != HAL_DIAG_REGS)
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
if (outsize < ad->ad_out_size)
ad->ad_out_size = outsize;
if (outdata != NULL)
error = copyout(outdata, ad->ad_out_data,
ad->ad_out_size);
} else {
error = EINVAL;
}
ATH_LOCK(sc);
if (id != HAL_DIAG_REGS)
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
bad:
if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
free(indata, M_TEMP);
if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
free(outdata, M_TEMP);
return error;
}
#endif /* ATH_DIAGAPI */
-static int
-ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+ath_parent(struct ieee80211com *ic)
{
-#define IS_RUNNING(ifp) \
- ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
- struct ieee80211com *ic = ifp->if_l2com;
struct ath_softc *sc = ic->ic_softc;
- struct ifreq *ifr = (struct ifreq *)data;
- const HAL_RATE_TABLE *rt;
- int error = 0;
+ int error = EDOOFUS;
- switch (cmd) {
- case SIOCSIFFLAGS:
- if (IS_RUNNING(ifp)) {
- /*
- * To avoid rescanning another access point,
- * do not call ath_init() here. Instead,
- * only reflect promisc mode settings.
- */
- ATH_LOCK(sc);
+ ATH_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ /*
+ * To avoid rescanning another access point,
+ * do not call ath_init() here. Instead,
+ * only reflect promisc mode settings.
+ */
+ if (sc->sc_running) {
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ath_mode_init(sc);
ath_power_restore_power_state(sc);
- ATH_UNLOCK(sc);
- } else if (ifp->if_flags & IFF_UP) {
+ } else if (!sc->sc_invalid) {
/*
* Beware of being called during attach/detach
* to reset promiscuous mode. In that case we
* will still be marked UP but not RUNNING.
* However trying to re-init the interface
* is the wrong thing to do as we've already
* torn down much of our state. There's
* probably a better way to deal with this.
*/
- if (!sc->sc_invalid)
- ath_init(sc); /* XXX lose error */
- } else {
- ATH_LOCK(sc);
- ath_stop_locked(ifp);
- if (!sc->sc_invalid)
- ath_power_setpower(sc, HAL_PM_FULL_SLEEP);
- ATH_UNLOCK(sc);
+ error = ath_init(sc);
}
- break;
- case SIOCGIFMEDIA:
- case SIOCSIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGATHSTATS:
+ } else {
+ ath_stop(sc);
+ if (!sc->sc_invalid)
+ ath_power_setpower(sc, HAL_PM_FULL_SLEEP);
+ }
+ ATH_UNLOCK(sc);
+
+ if (error == 0) {
+#ifdef ATH_TX99_DIAG
+ if (sc->sc_tx99 != NULL)
+ sc->sc_tx99->start(sc->sc_tx99);
+ else
+#endif
+ ieee80211_start_all(ic);
+ }
+}
+
+static int
+ath_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
+{
+ struct ifreq *ifr = data;
+ struct ath_softc *sc = ic->ic_softc;
+
+ switch (cmd) {
+ case SIOCGATHSTATS: {
+ struct ieee80211vap *vap;
+ struct ifnet *ifp;
+ const HAL_RATE_TABLE *rt;
+
/* NB: embed these numbers to get a consistent view */
- sc->sc_stats.ast_tx_packets = ifp->if_get_counter(ifp,
- IFCOUNTER_OPACKETS);
- sc->sc_stats.ast_rx_packets = ifp->if_get_counter(ifp,
- IFCOUNTER_IPACKETS);
+ sc->sc_stats.ast_tx_packets = 0;
+ sc->sc_stats.ast_rx_packets = 0;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ ifp = vap->iv_ifp;
+ sc->sc_stats.ast_tx_packets += ifp->if_get_counter(ifp,
+ IFCOUNTER_OPACKETS);
+ sc->sc_stats.ast_rx_packets += ifp->if_get_counter(ifp,
+ IFCOUNTER_IPACKETS);
+ }
sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
#ifdef IEEE80211_SUPPORT_TDMA
sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
#endif
rt = sc->sc_currates;
sc->sc_stats.ast_tx_rate =
rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
return copyout(&sc->sc_stats,
ifr->ifr_data, sizeof (sc->sc_stats));
+ }
case SIOCGATHAGSTATS:
return copyout(&sc->sc_aggr_stats,
ifr->ifr_data, sizeof (sc->sc_aggr_stats));
- case SIOCZATHSTATS:
+ case SIOCZATHSTATS: {
+ int error;
+
error = priv_check(curthread, PRIV_DRIVER);
if (error == 0) {
memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
memset(&sc->sc_aggr_stats, 0,
sizeof(sc->sc_aggr_stats));
memset(&sc->sc_intr_stats, 0,
sizeof(sc->sc_intr_stats));
}
- break;
+ return (error);
+ }
#ifdef ATH_DIAGAPI
case SIOCGATHDIAG:
- error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
- break;
+ return (ath_ioctl_diag(sc, data));
case SIOCGATHPHYERR:
- error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr);
- break;
+ return (ath_ioctl_phyerr(sc, data));
#endif
case SIOCGATHSPECTRAL:
- error = ath_ioctl_spectral(sc,(struct ath_diag*) ifr);
- break;
+ return (ath_ioctl_spectral(sc, data));
case SIOCGATHNODERATESTATS:
- error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
+ return (ath_ioctl_ratestats(sc, data));
default:
- error = EINVAL;
- break;
+ return (ENOTTY);
}
- return error;
-#undef IS_RUNNING
}
/*
* Announce various information on device/driver attach.
*/
static void
ath_announce(struct ath_softc *sc)
{
struct ath_hal *ah = sc->sc_ah;
device_printf(sc->sc_dev, "AR%s mac %d.%d RF%s phy %d.%d\n",
ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
device_printf(sc->sc_dev, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
if (bootverbose) {
int i;
for (i = 0; i <= WME_AC_VO; i++) {
struct ath_txq *txq = sc->sc_ac2q[i];
device_printf(sc->sc_dev,
"Use hw queue %u for %s traffic\n",
txq->axq_qnum, ieee80211_wme_acnames[i]);
}
device_printf(sc->sc_dev, "Use hw queue %u for CAB traffic\n",
sc->sc_cabq->axq_qnum);
device_printf(sc->sc_dev, "Use hw queue %u for beacons\n",
sc->sc_bhalq);
}
if (ath_rxbuf != ATH_RXBUF)
device_printf(sc->sc_dev, "using %u rx buffers\n", ath_rxbuf);
if (ath_txbuf != ATH_TXBUF)
device_printf(sc->sc_dev, "using %u tx buffers\n", ath_txbuf);
if (sc->sc_mcastkey && bootverbose)
device_printf(sc->sc_dev, "using multicast key search\n");
}
static void
ath_dfs_tasklet(void *p, int npending)
{
struct ath_softc *sc = (struct ath_softc *) p;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
/*
* If previous processing has found a radar event,
* signal this to the net80211 layer to begin DFS
* processing.
*/
if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
/* DFS event found, initiate channel change */
/*
* XXX doesn't currently tell us whether the event
* XXX was found in the primary or extension
* XXX channel!
*/
IEEE80211_LOCK(ic);
ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
IEEE80211_UNLOCK(ic);
}
}
/*
* Enable/disable power save. This must be called with
* no TX driver locks currently held, so it should only
* be called from the RX path (which doesn't hold any
* TX driver locks.)
*/
static void
ath_node_powersave(struct ieee80211_node *ni, int enable)
{
#ifdef ATH_SW_PSQ
struct ath_node *an = ATH_NODE(ni);
struct ieee80211com *ic = ni->ni_ic;
struct ath_softc *sc = ic->ic_softc;
struct ath_vap *avp = ATH_VAP(ni->ni_vap);
/* XXX and no TXQ locks should be held here */
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n",
__func__,
ni->ni_macaddr,
":",
!! enable);
/* Suspend or resume software queue handling */
if (enable)
ath_tx_node_sleep(sc, an);
else
ath_tx_node_wakeup(sc, an);
/* Update net80211 state */
avp->av_node_ps(ni, enable);
#else
struct ath_vap *avp = ATH_VAP(ni->ni_vap);
/* Update net80211 state */
avp->av_node_ps(ni, enable);
#endif/* ATH_SW_PSQ */
}
/*
* Notification from net80211 that the powersave queue state has
* changed.
*
* Since the software queue also may have some frames:
*
* + if the node software queue has frames and the TID state
* is 0, we set the TIM;
* + if the node and the stack are both empty, we clear the TIM bit.
* + If the stack tries to set the bit, always set it.
* + If the stack tries to clear the bit, only clear it if the
* software queue in question is also cleared.
*
* TODO: this is called during node teardown; so let's ensure this
* is all correctly handled and that the TIM bit is cleared.
* It may be that the node flush is called _AFTER_ the net80211
* stack clears the TIM.
*
* Here is the racy part. Since it's possible >1 concurrent,
* overlapping TXes will appear complete with a TX completion in
* another thread, it's possible that the concurrent TIM calls will
* clash. We can't hold the node lock here because setting the
* TIM grabs the net80211 comlock and this may cause a LOR.
* The solution is either to totally serialise _everything_ at
* this point (ie, all TX, completion and any reset/flush go into
* one taskqueue) or a new "ath TIM lock" needs to be created that
* just wraps the driver state change and this call to avp->av_set_tim().
*
* The same race exists in the net80211 power save queue handling
* as well. Since multiple transmitting threads may queue frames
* into the driver, as well as ps-poll and the driver transmitting
* frames (and thus clearing the psq), it's quite possible that
* a packet entering the PSQ and a ps-poll being handled will
* race, causing the TIM to be cleared and not re-set.
*/
static int
ath_node_set_tim(struct ieee80211_node *ni, int enable)
{
#ifdef ATH_SW_PSQ
struct ieee80211com *ic = ni->ni_ic;
struct ath_softc *sc = ic->ic_softc;
struct ath_node *an = ATH_NODE(ni);
struct ath_vap *avp = ATH_VAP(ni->ni_vap);
int changed = 0;
ATH_TX_LOCK(sc);
an->an_stack_psq = enable;
/*
* This will get called for all operating modes,
* even if avp->av_set_tim is unset.
* It's currently set for hostap/ibss modes; but
* the same infrastructure is used for both STA
* and AP/IBSS node power save.
*/
if (avp->av_set_tim == NULL) {
ATH_TX_UNLOCK(sc);
return (0);
}
/*
* If setting the bit, always set it here.
* If clearing the bit, only clear it if the
* software queue is also empty.
*
* If the node has left power save, just clear the TIM
* bit regardless of the state of the power save queue.
*
* XXX TODO: although atomics are used, it's quite possible
* that a race will occur between this and setting/clearing
* in another thread. TX completion will occur always in
* one thread, however setting/clearing the TIM bit can come
* from a variety of different process contexts!
*/
if (enable && an->an_tim_set == 1) {
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: enable=%d, tim_set=1, ignoring\n",
__func__,
ni->ni_macaddr,
":",
enable);
ATH_TX_UNLOCK(sc);
} else if (enable) {
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: enable=%d, enabling TIM\n",
__func__,
ni->ni_macaddr,
":",
enable);
an->an_tim_set = 1;
ATH_TX_UNLOCK(sc);
changed = avp->av_set_tim(ni, enable);
} else if (an->an_swq_depth == 0) {
/* disable */
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: enable=%d, an_swq_depth == 0, disabling\n",
__func__,
ni->ni_macaddr,
":",
enable);
an->an_tim_set = 0;
ATH_TX_UNLOCK(sc);
changed = avp->av_set_tim(ni, enable);
} else if (! an->an_is_powersave) {
/*
* disable regardless; the node isn't in powersave now
*/
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: enable=%d, an_pwrsave=0, disabling\n",
__func__,
ni->ni_macaddr,
":",
enable);
an->an_tim_set = 0;
ATH_TX_UNLOCK(sc);
changed = avp->av_set_tim(ni, enable);
} else {
/*
* psq disable, node is currently in powersave, node
* software queue isn't empty, so don't clear the TIM bit
* for now.
*/
ATH_TX_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n",
__func__,
ni->ni_macaddr,
":",
enable);
changed = 0;
}
return (changed);
#else
struct ath_vap *avp = ATH_VAP(ni->ni_vap);
/*
* Some operating modes don't set av_set_tim(), so don't
* update it here.
*/
if (avp->av_set_tim == NULL)
return (0);
return (avp->av_set_tim(ni, enable));
#endif /* ATH_SW_PSQ */
}
/*
* Set or update the TIM from the software queue.
*
* Check the software queue depth before attempting to do lock
* anything; that avoids trying to obtain the lock. Then,
* re-check afterwards to ensure nothing has changed in the
* meantime.
*
* set: This is designed to be called from the TX path, after
* a frame has been queued; to see if the swq > 0.
*
* clear: This is designed to be called from the buffer completion point
* (right now it's ath_tx_default_comp()) where the state of
* a software queue has changed.
*
* It makes sense to place it at buffer free / completion rather
* than after each software queue operation, as there's no real
* point in churning the TIM bit as the last frames in the software
* queue are transmitted. If they fail and we retry them, we'd
* just be setting the TIM bit again anyway.
*/
void
ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
int enable)
{
#ifdef ATH_SW_PSQ
struct ath_node *an;
struct ath_vap *avp;
/* Don't do this for broadcast/etc frames */
if (ni == NULL)
return;
an = ATH_NODE(ni);
avp = ATH_VAP(ni->ni_vap);
/*
* And for operating modes without the TIM handler set, let's
* just skip those.
*/
if (avp->av_set_tim == NULL)
return;
ATH_TX_LOCK_ASSERT(sc);
if (enable) {
if (an->an_is_powersave &&
an->an_tim_set == 0 &&
an->an_swq_depth != 0) {
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: swq_depth>0, tim_set=0, set!\n",
__func__,
ni->ni_macaddr,
":");
an->an_tim_set = 1;
(void) avp->av_set_tim(ni, 1);
}
} else {
/*
* Don't bother grabbing the lock unless the queue is empty.
*/
if (an->an_swq_depth != 0)
return;
if (an->an_is_powersave &&
an->an_stack_psq == 0 &&
an->an_tim_set == 1 &&
an->an_swq_depth == 0) {
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: swq_depth=0, tim_set=1, psq_set=0,"
" clear!\n",
__func__,
ni->ni_macaddr,
":");
an->an_tim_set = 0;
(void) avp->av_set_tim(ni, 0);
}
}
#else
return;
#endif /* ATH_SW_PSQ */
}
/*
* Received a ps-poll frame from net80211.
*
* Here we get a chance to serve out a software-queued frame ourselves
* before we punt it to net80211 to transmit us one itself - either
* because there's traffic in the net80211 psq, or a NULL frame to
* indicate there's nothing else.
*/
static void
ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m)
{
#ifdef ATH_SW_PSQ
struct ath_node *an;
struct ath_vap *avp;
struct ieee80211com *ic = ni->ni_ic;
struct ath_softc *sc = ic->ic_softc;
int tid;
/* Just paranoia */
if (ni == NULL)
return;
/*
* Unassociated (temporary node) station.
*/
if (ni->ni_associd == 0)
return;
/*
* We do have an active node, so let's begin looking into it.
*/
an = ATH_NODE(ni);
avp = ATH_VAP(ni->ni_vap);
/*
* For now, we just call the original ps-poll method.
* Once we're ready to flip this on:
*
* + Set leak to 1, as no matter what we're going to have
* to send a frame;
* + Check the software queue and if there's something in it,
* schedule the highest TID thas has traffic from this node.
* Then make sure we schedule the software scheduler to
* run so it picks up said frame.
*
* That way whatever happens, we'll at least send _a_ frame
* to the given node.
*
* Again, yes, it's crappy QoS if the node has multiple
* TIDs worth of traffic - but let's get it working first
* before we optimise it.
*
* Also yes, there's definitely latency here - we're not
* direct dispatching to the hardware in this path (and
* we're likely being called from the packet receive path,
* so going back into TX may be a little hairy!) but again
* I'd like to get this working first before optimising
* turn-around time.
*/
ATH_TX_LOCK(sc);
/*
* Legacy - we're called and the node isn't asleep.
* Immediately punt.
*/
if (! an->an_is_powersave) {
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: not in powersave?\n",
__func__,
ni->ni_macaddr,
":");
ATH_TX_UNLOCK(sc);
avp->av_recv_pspoll(ni, m);
return;
}
/*
* We're in powersave.
*
* Leak a frame.
*/
an->an_leak_count = 1;
/*
* Now, if there's no frames in the node, just punt to
* recv_pspoll.
*
* Don't bother checking if the TIM bit is set, we really
* only care if there are any frames here!
*/
if (an->an_swq_depth == 0) {
ATH_TX_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: SWQ empty; punting to net80211\n",
__func__,
ni->ni_macaddr,
":");
avp->av_recv_pspoll(ni, m);
return;
}
/*
* Ok, let's schedule the highest TID that has traffic
* and then schedule something.
*/
for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) {
struct ath_tid *atid = &an->an_tid[tid];
/*
* No frames? Skip.
*/
if (atid->axq_depth == 0)
continue;
ath_tx_tid_sched(sc, atid);
/*
* XXX we could do a direct call to the TXQ
* scheduler code here to optimise latency
* at the expense of a REALLY deep callstack.
*/
ATH_TX_UNLOCK(sc);
taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask);
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: leaking frame to TID %d\n",
__func__,
ni->ni_macaddr,
":",
tid);
return;
}
ATH_TX_UNLOCK(sc);
/*
* XXX nothing in the TIDs at this point? Eek.
*/
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: TIDs empty, but ath_node showed traffic?!\n",
__func__,
ni->ni_macaddr,
":");
avp->av_recv_pspoll(ni, m);
#else
avp->av_recv_pspoll(ni, m);
#endif /* ATH_SW_PSQ */
}
MODULE_VERSION(if_ath, 1);
MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ)
MODULE_DEPEND(if_ath, alq, 1, 1, 1);
#endif
Index: head/sys/dev/ath/if_ath_beacon.c
===================================================================
--- head/sys/dev/ath/if_ath_beacon.c (revision 287196)
+++ head/sys/dev/ath/if_ath_beacon.c (revision 287197)
@@ -1,1188 +1,1188 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Atheros Wireless LAN controller.
*
* This software is derived from work of Atsushi Onoe; his contribution
* is greatly appreciated.
*/
#include "opt_inet.h"
#include "opt_ath.h"
/*
* This is needed for register operations which are performed
* by the driver - eg, calls to ath_hal_gettsf32().
*
* It's also required for any AH_DEBUG checks in here, eg the
* module dependencies.
*/
#include "opt_ah.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/module.h>
#include <sys/ktr.h>
#include <sys/smp.h> /* for mp_ncpus */
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/if_ath_debug.h>
#include <dev/ath/if_ath_misc.h>
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_beacon.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
/*
* Setup a h/w transmit queue for beacons.
*/
int
ath_beaconq_setup(struct ath_softc *sc)
{
struct ath_hal *ah = sc->sc_ah;
HAL_TXQ_INFO qi;
memset(&qi, 0, sizeof(qi));
qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
/* NB: for dynamic turbo, don't enable any other interrupts */
qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
if (sc->sc_isedma)
qi.tqi_qflags |= HAL_TXQ_TXOKINT_ENABLE |
HAL_TXQ_TXERRINT_ENABLE;
return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
}
/*
* Setup the transmit queue parameters for the beacon queue.
*/
int
ath_beaconq_config(struct ath_softc *sc)
{
#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1)
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
HAL_TXQ_INFO qi;
ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS) {
/*
* Always burst out beacon and CAB traffic.
*/
qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
} else {
struct wmeParams *wmep =
&ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
/*
* Adhoc mode; important thing is to use 2x cwmin.
*/
qi.tqi_aifs = wmep->wmep_aifsn;
qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
}
if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
device_printf(sc->sc_dev, "unable to update parameters for "
"beacon hardware queue!\n");
return 0;
} else {
ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
return 1;
}
#undef ATH_EXPONENT_TO_VALUE
}
/*
* Allocate and setup an initial beacon frame.
*/
int
ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ath_vap *avp = ATH_VAP(vap);
struct ath_buf *bf;
struct mbuf *m;
int error;
bf = avp->av_bcbuf;
DPRINTF(sc, ATH_DEBUG_NODE, "%s: bf_m=%p, bf_node=%p\n",
__func__, bf->bf_m, bf->bf_node);
if (bf->bf_m != NULL) {
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
bf->bf_m = NULL;
}
if (bf->bf_node != NULL) {
ieee80211_free_node(bf->bf_node);
bf->bf_node = NULL;
}
/*
* NB: the beacon data buffer must be 32-bit aligned;
* we assume the mbuf routines will return us something
* with this alignment (perhaps should assert).
*/
m = ieee80211_beacon_alloc(ni, &avp->av_boff);
if (m == NULL) {
device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
sc->sc_stats.ast_be_nombuf++;
return ENOMEM;
}
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
__func__, error);
m_freem(m);
return error;
}
/*
* Calculate a TSF adjustment factor required for staggered
* beacons. Note that we assume the format of the beacon
* frame leaves the tstamp field immediately following the
* header.
*/
if (sc->sc_stagbeacons && avp->av_bslot > 0) {
uint64_t tsfadjust;
struct ieee80211_frame *wh;
/*
* The beacon interval is in TU's; the TSF is in usecs.
* We figure out how many TU's to add to align the timestamp
* then convert to TSF units and handle byte swapping before
* inserting it in the frame. The hardware will then add this
* each time a beacon frame is sent. Note that we align vap's
* 1..N and leave vap 0 untouched. This means vap 0 has a
* timestamp in one beacon interval while the others get a
* timstamp aligned to the next interval.
*/
tsfadjust = ni->ni_intval *
(ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
__func__, sc->sc_stagbeacons ? "stagger" : "burst",
avp->av_bslot, ni->ni_intval,
(long long unsigned) le64toh(tsfadjust));
wh = mtod(m, struct ieee80211_frame *);
memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
}
bf->bf_m = m;
bf->bf_node = ieee80211_ref_node(ni);
return 0;
}
/*
* Setup the beacon frame for transmit.
*/
static void
ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
{
#define USE_SHPREAMBLE(_ic) \
(((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
== IEEE80211_F_SHPREAMBLE)
struct ieee80211_node *ni = bf->bf_node;
struct ieee80211com *ic = ni->ni_ic;
struct mbuf *m = bf->bf_m;
struct ath_hal *ah = sc->sc_ah;
struct ath_desc *ds;
int flags, antenna;
const HAL_RATE_TABLE *rt;
u_int8_t rix, rate;
HAL_DMA_ADDR bufAddrList[4];
uint32_t segLenList[4];
HAL_11N_RATE_SERIES rc[4];
DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
__func__, m, m->m_len);
/* setup descriptors */
ds = bf->bf_desc;
bf->bf_last = bf;
bf->bf_lastds = ds;
flags = HAL_TXDESC_NOACK;
if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
/* self-linked descriptor */
ath_hal_settxdesclink(sc->sc_ah, ds, bf->bf_daddr);
flags |= HAL_TXDESC_VEOL;
/*
* Let hardware handle antenna switching.
*/
antenna = sc->sc_txantenna;
} else {
ath_hal_settxdesclink(sc->sc_ah, ds, 0);
/*
* Switch antenna every 4 beacons.
* XXX assumes two antenna
*/
if (sc->sc_txantenna != 0)
antenna = sc->sc_txantenna;
else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
else
antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
}
KASSERT(bf->bf_nseg == 1,
("multi-segment beacon frame; nseg %u", bf->bf_nseg));
/*
* Calculate rate code.
* XXX everything at min xmit rate
*/
rix = 0;
rt = sc->sc_currates;
rate = rt->info[rix].rateCode;
if (USE_SHPREAMBLE(ic))
rate |= rt->info[rix].shortPreamble;
ath_hal_setuptxdesc(ah, ds
, m->m_len + IEEE80211_CRC_LEN /* frame length */
, sizeof(struct ieee80211_frame)/* header length */
, HAL_PKT_TYPE_BEACON /* Atheros packet type */
, ieee80211_get_node_txpower(ni) /* txpower XXX */
, rate, 1 /* series 0 rate/tries */
, HAL_TXKEYIX_INVALID /* no encryption */
, antenna /* antenna mode */
, flags /* no ack, veol for beacons */
, 0 /* rts/cts rate */
, 0 /* rts/cts duration */
);
/*
* The EDMA HAL currently assumes that _all_ rate control
* settings are done in ath_hal_set11nratescenario(), rather
* than in ath_hal_setuptxdesc().
*/
if (sc->sc_isedma) {
memset(&rc, 0, sizeof(rc));
rc[0].ChSel = sc->sc_txchainmask;
rc[0].Tries = 1;
rc[0].Rate = rt->info[rix].rateCode;
rc[0].RateIndex = rix;
rc[0].tx_power_cap = 0x3f;
rc[0].PktDuration =
ath_hal_computetxtime(ah, rt, roundup(m->m_len, 4),
rix, 0);
ath_hal_set11nratescenario(ah, ds, 0, 0, rc, 4, flags);
}
/* NB: beacon's BufLen must be a multiple of 4 bytes */
segLenList[0] = roundup(m->m_len, 4);
segLenList[1] = segLenList[2] = segLenList[3] = 0;
bufAddrList[0] = bf->bf_segs[0].ds_addr;
bufAddrList[1] = bufAddrList[2] = bufAddrList[3] = 0;
ath_hal_filltxdesc(ah, ds
, bufAddrList
, segLenList
, 0 /* XXX desc id */
, sc->sc_bhalq /* hardware TXQ */
, AH_TRUE /* first segment */
, AH_TRUE /* last segment */
, ds /* first descriptor */
);
#if 0
ath_desc_swap(ds);
#endif
#undef USE_SHPREAMBLE
}
void
ath_beacon_update(struct ieee80211vap *vap, int item)
{
struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
setbit(bo->bo_flags, item);
}
/*
* Handle a beacon miss.
*/
void
ath_beacon_miss(struct ath_softc *sc)
{
HAL_SURVEY_SAMPLE hs;
HAL_BOOL ret;
uint32_t hangs;
bzero(&hs, sizeof(hs));
ret = ath_hal_get_mib_cycle_counts(sc->sc_ah, &hs);
if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && hangs != 0) {
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: hang=0x%08x\n",
__func__,
hangs);
}
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_MISSED_BEACON))
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_MISSED_BEACON, 0, NULL);
#endif
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: valid=%d, txbusy=%u, rxbusy=%u, chanbusy=%u, "
"extchanbusy=%u, cyclecount=%u\n",
__func__,
ret,
hs.tx_busy,
hs.rx_busy,
hs.chan_busy,
hs.ext_chan_busy,
hs.cycle_count);
}
/*
* Transmit a beacon frame at SWBA. Dynamic updates to the
* frame contents are done as needed and the slot time is
* also adjusted based on current state.
*/
void
ath_beacon_proc(void *arg, int pending)
{
struct ath_softc *sc = arg;
struct ath_hal *ah = sc->sc_ah;
struct ieee80211vap *vap;
struct ath_buf *bf;
int slot, otherant;
uint32_t bfaddr;
DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
__func__, pending);
/*
* Check if the previous beacon has gone out. If
* not don't try to post another, skip this period
* and wait for the next. Missed beacons indicate
* a problem and should not occur. If we miss too
* many consecutive beacons reset the device.
*/
if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
sc->sc_bmisscount++;
sc->sc_stats.ast_be_missed++;
ath_beacon_miss(sc);
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: missed %u consecutive beacons\n",
__func__, sc->sc_bmisscount);
if (sc->sc_bmisscount >= ath_bstuck_threshold)
taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
return;
}
if (sc->sc_bmisscount != 0) {
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: resume beacon xmit after %u misses\n",
__func__, sc->sc_bmisscount);
sc->sc_bmisscount = 0;
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_RESUME_BEACON))
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_RESUME_BEACON, 0, NULL);
#endif
}
if (sc->sc_stagbeacons) { /* staggered beacons */
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tsftu;
tsftu = ath_hal_gettsf32(ah) >> 10;
/* XXX lintval */
slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
bfaddr = 0;
if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
bf = ath_beacon_generate(sc, vap);
if (bf != NULL)
bfaddr = bf->bf_daddr;
}
} else { /* burst'd beacons */
uint32_t *bflink = &bfaddr;
for (slot = 0; slot < ATH_BCBUF; slot++) {
vap = sc->sc_bslot[slot];
if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
bf = ath_beacon_generate(sc, vap);
/*
* XXX TODO: this should use settxdesclinkptr()
* otherwise it won't work for EDMA chipsets!
*/
if (bf != NULL) {
/* XXX should do this using the ds */
*bflink = bf->bf_daddr;
ath_hal_gettxdesclinkptr(sc->sc_ah,
bf->bf_desc, &bflink);
}
}
}
/*
* XXX TODO: this should use settxdesclinkptr()
* otherwise it won't work for EDMA chipsets!
*/
*bflink = 0; /* terminate list */
}
/*
* Handle slot time change when a non-ERP station joins/leaves
* an 11g network. The 802.11 layer notifies us via callback,
* we mark updateslot, then wait one beacon before effecting
* the change. This gives associated stations at least one
* beacon interval to note the state change.
*/
/* XXX locking */
if (sc->sc_updateslot == UPDATE) {
sc->sc_updateslot = COMMIT; /* commit next beacon */
sc->sc_slotupdate = slot;
} else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
ath_setslottime(sc); /* commit change to h/w */
/*
* Check recent per-antenna transmit statistics and flip
* the default antenna if noticeably more frames went out
* on the non-default antenna.
* XXX assumes 2 anntenae
*/
if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
otherant = sc->sc_defant & 1 ? 2 : 1;
if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
ath_setdefantenna(sc, otherant);
sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
}
/* Program the CABQ with the contents of the CABQ txq and start it */
ATH_TXQ_LOCK(sc->sc_cabq);
ath_beacon_cabq_start(sc);
ATH_TXQ_UNLOCK(sc->sc_cabq);
/* Program the new beacon frame if we have one for this interval */
if (bfaddr != 0) {
/*
* Stop any current dma and put the new frame on the queue.
* This should never fail since we check above that no frames
* are still pending on the queue.
*/
if (! sc->sc_isedma) {
if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
DPRINTF(sc, ATH_DEBUG_ANY,
"%s: beacon queue %u did not stop?\n",
__func__, sc->sc_bhalq);
}
}
/* NB: cabq traffic should already be queued and primed */
ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
ath_hal_txstart(ah, sc->sc_bhalq);
sc->sc_stats.ast_be_xmit++;
}
}
static void
ath_beacon_cabq_start_edma(struct ath_softc *sc)
{
struct ath_buf *bf, *bf_last;
struct ath_txq *cabq = sc->sc_cabq;
#if 0
struct ath_buf *bfi;
int i = 0;
#endif
ATH_TXQ_LOCK_ASSERT(cabq);
if (TAILQ_EMPTY(&cabq->axq_q))
return;
bf = TAILQ_FIRST(&cabq->axq_q);
bf_last = TAILQ_LAST(&cabq->axq_q, axq_q_s);
/*
* This is a dirty, dirty hack to push the contents of
* the cabq staging queue into the FIFO.
*
* This ideally should live in the EDMA code file
* and only push things into the CABQ if there's a FIFO
* slot.
*
* We can't treat this like a normal TX queue because
* in the case of multi-VAP traffic, we may have to flush
* the CABQ each new (staggered) beacon that goes out.
* But for non-staggered beacons, we could in theory
* handle multicast traffic for all VAPs in one FIFO
* push. Just keep all of this in mind if you're wondering
* how to correctly/better handle multi-VAP CABQ traffic
* with EDMA.
*/
/*
* Is the CABQ FIFO free? If not, complain loudly and
* don't queue anything. Maybe we'll flush the CABQ
* traffic, maybe we won't. But that'll happen next
* beacon interval.
*/
if (cabq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) {
device_printf(sc->sc_dev,
"%s: Q%d: CAB FIFO queue=%d?\n",
__func__,
cabq->axq_qnum,
cabq->axq_fifo_depth);
return;
}
/*
* Ok, so here's the gymnastics reqiured to make this
* all sensible.
*/
/*
* Tag the first/last buffer appropriately.
*/
bf->bf_flags |= ATH_BUF_FIFOPTR;
bf_last->bf_flags |= ATH_BUF_FIFOEND;
#if 0
i = 0;
TAILQ_FOREACH(bfi, &cabq->axq_q, bf_list) {
ath_printtxbuf(sc, bf, cabq->axq_qnum, i, 0);
i++;
}
#endif
/*
* We now need to push this set of frames onto the tail
* of the FIFO queue. We don't adjust the aggregate
* count, only the queue depth counter(s).
* We also need to blank the link pointer now.
*/
TAILQ_CONCAT(&cabq->fifo.axq_q, &cabq->axq_q, bf_list);
cabq->axq_link = NULL;
cabq->fifo.axq_depth += cabq->axq_depth;
cabq->axq_depth = 0;
/* Bump FIFO queue */
cabq->axq_fifo_depth++;
/* Push the first entry into the hardware */
ath_hal_puttxbuf(sc->sc_ah, cabq->axq_qnum, bf->bf_daddr);
cabq->axq_flags |= ATH_TXQ_PUTRUNNING;
/* NB: gated by beacon so safe to start here */
ath_hal_txstart(sc->sc_ah, cabq->axq_qnum);
}
static void
ath_beacon_cabq_start_legacy(struct ath_softc *sc)
{
struct ath_buf *bf;
struct ath_txq *cabq = sc->sc_cabq;
ATH_TXQ_LOCK_ASSERT(cabq);
if (TAILQ_EMPTY(&cabq->axq_q))
return;
bf = TAILQ_FIRST(&cabq->axq_q);
/* Push the first entry into the hardware */
ath_hal_puttxbuf(sc->sc_ah, cabq->axq_qnum, bf->bf_daddr);
cabq->axq_flags |= ATH_TXQ_PUTRUNNING;
/* NB: gated by beacon so safe to start here */
ath_hal_txstart(sc->sc_ah, cabq->axq_qnum);
}
/*
* Start CABQ transmission - this assumes that all frames are prepped
* and ready in the CABQ.
*/
void
ath_beacon_cabq_start(struct ath_softc *sc)
{
struct ath_txq *cabq = sc->sc_cabq;
ATH_TXQ_LOCK_ASSERT(cabq);
if (TAILQ_EMPTY(&cabq->axq_q))
return;
if (sc->sc_isedma)
ath_beacon_cabq_start_edma(sc);
else
ath_beacon_cabq_start_legacy(sc);
}
struct ath_buf *
ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
{
struct ath_vap *avp = ATH_VAP(vap);
struct ath_txq *cabq = sc->sc_cabq;
struct ath_buf *bf;
struct mbuf *m;
int nmcastq, error;
KASSERT(vap->iv_state >= IEEE80211_S_RUN,
("not running, state %d", vap->iv_state));
KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
/*
* Update dynamic beacon contents. If this returns
* non-zero then we need to remap the memory because
* the beacon frame changed size (probably because
* of the TIM bitmap).
*/
bf = avp->av_bcbuf;
m = bf->bf_m;
/* XXX lock mcastq? */
nmcastq = avp->av_mcastq.axq_depth;
if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
/* XXX too conservative? */
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error != 0) {
if_printf(vap->iv_ifp,
"%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
__func__, error);
return NULL;
}
}
if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: cabq did not drain, mcastq %u cabq %u\n",
__func__, nmcastq, cabq->axq_depth);
sc->sc_stats.ast_cabq_busy++;
if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
/*
* CABQ traffic from a previous vap is still pending.
* We must drain the q before this beacon frame goes
* out as otherwise this vap's stations will get cab
* frames from a different vap.
* XXX could be slow causing us to miss DBA
*/
/*
* XXX TODO: this doesn't stop CABQ DMA - it assumes
* that since we're about to transmit a beacon, we've
* already stopped transmitting on the CABQ. But this
* doesn't at all mean that the CABQ DMA QCU will
* accept a new TXDP! So what, should we do a DMA
* stop? What if it fails?
*
* More thought is required here.
*/
/*
* XXX can we even stop TX DMA here? Check what the
* reference driver does for cabq for beacons, given
* that stopping TX requires RX is paused.
*/
ath_tx_draintxq(sc, cabq);
}
}
ath_beacon_setup(sc, bf);
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
/*
* Enable the CAB queue before the beacon queue to
* insure cab frames are triggered by this beacon.
*/
if (avp->av_boff.bo_tim[4] & 1) {
/* NB: only at DTIM */
ATH_TXQ_LOCK(&avp->av_mcastq);
if (nmcastq) {
struct ath_buf *bfm, *bfc_last;
/*
* Move frames from the s/w mcast q to the h/w cab q.
*
* XXX TODO: if we chain together multiple VAPs
* worth of CABQ traffic, should we keep the
* MORE data bit set on the last frame of each
* intermediary VAP (ie, only clear the MORE
* bit of the last frame on the last vap?)
*/
bfm = TAILQ_FIRST(&avp->av_mcastq.axq_q);
ATH_TXQ_LOCK(cabq);
/*
* If there's already a frame on the CABQ, we
* need to link to the end of the last frame.
* We can't use axq_link here because
* EDMA descriptors require some recalculation
* (checksum) to occur.
*/
bfc_last = ATH_TXQ_LAST(cabq, axq_q_s);
if (bfc_last != NULL) {
ath_hal_settxdesclink(sc->sc_ah,
bfc_last->bf_lastds,
bfm->bf_daddr);
}
ath_txqmove(cabq, &avp->av_mcastq);
ATH_TXQ_UNLOCK(cabq);
/*
* XXX not entirely accurate, in case a mcast
* queue frame arrived before we grabbed the TX
* lock.
*/
sc->sc_stats.ast_cabq_xmit += nmcastq;
}
ATH_TXQ_UNLOCK(&avp->av_mcastq);
}
return bf;
}
void
ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
{
struct ath_vap *avp = ATH_VAP(vap);
struct ath_hal *ah = sc->sc_ah;
struct ath_buf *bf;
struct mbuf *m;
int error;
KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
/*
* Update dynamic beacon contents. If this returns
* non-zero then we need to remap the memory because
* the beacon frame changed size (probably because
* of the TIM bitmap).
*/
bf = avp->av_bcbuf;
m = bf->bf_m;
if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
/* XXX too conservative? */
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error != 0) {
if_printf(vap->iv_ifp,
"%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
__func__, error);
return;
}
}
ath_beacon_setup(sc, bf);
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
/* NB: caller is known to have already stopped tx dma */
ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
ath_hal_txstart(ah, sc->sc_bhalq);
}
/*
* Reclaim beacon resources and return buffer to the pool.
*/
void
ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
{
DPRINTF(sc, ATH_DEBUG_NODE, "%s: free bf=%p, bf_m=%p, bf_node=%p\n",
__func__, bf, bf->bf_m, bf->bf_node);
if (bf->bf_m != NULL) {
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
bf->bf_m = NULL;
}
if (bf->bf_node != NULL) {
ieee80211_free_node(bf->bf_node);
bf->bf_node = NULL;
}
TAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
}
/*
* Reclaim beacon resources.
*/
void
ath_beacon_free(struct ath_softc *sc)
{
struct ath_buf *bf;
TAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
DPRINTF(sc, ATH_DEBUG_NODE,
"%s: free bf=%p, bf_m=%p, bf_node=%p\n",
__func__, bf, bf->bf_m, bf->bf_node);
if (bf->bf_m != NULL) {
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
bf->bf_m = NULL;
}
if (bf->bf_node != NULL) {
ieee80211_free_node(bf->bf_node);
bf->bf_node = NULL;
}
}
}
/*
* Configure the beacon and sleep timers.
*
* When operating as an AP this resets the TSF and sets
* up the hardware to notify us when we need to issue beacons.
*
* When operating in station mode this sets up the beacon
* timers according to the timestamp of the last received
* beacon and the current TSF, configures PCF and DTIM
* handling, programs the sleep registers so the hardware
* will wakeup in time to receive beacons, and configures
* the beacon miss handling so we'll receive a BMISS
* interrupt when we stop seeing beacons from the AP
* we've associated with.
*/
void
ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
{
#define TSF_TO_TU(_h,_l) \
((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
#define FUDGE 2
struct ath_hal *ah = sc->sc_ah;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
u_int32_t nexttbtt, intval, tsftu;
u_int32_t nexttbtt_u8, intval_u8;
u_int64_t tsf, tsf_beacon;
if (vap == NULL)
vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
/*
* Just ensure that we aren't being called when the last
* VAP is destroyed.
*/
if (vap == NULL) {
device_printf(sc->sc_dev, "%s: called with no VAPs\n",
__func__);
return;
}
ni = ieee80211_ref_node(vap->iv_bss);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
/* extract tstamp from last beacon and convert to TU */
nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
LE_READ_4(ni->ni_tstamp.data));
tsf_beacon = ((uint64_t) LE_READ_4(ni->ni_tstamp.data + 4)) << 32;
tsf_beacon |= LE_READ_4(ni->ni_tstamp.data);
if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS) {
/*
* For multi-bss ap/mesh support beacons are either staggered
* evenly over N slots or burst together. For the former
* arrange for the SWBA to be delivered for each slot.
* Slots that are not occupied will generate nothing.
*/
/* NB: the beacon interval is kept internally in TU's */
intval = ni->ni_intval & HAL_BEACON_PERIOD;
if (sc->sc_stagbeacons)
intval /= ATH_BCBUF;
} else {
/* NB: the beacon interval is kept internally in TU's */
intval = ni->ni_intval & HAL_BEACON_PERIOD;
}
if (nexttbtt == 0) /* e.g. for ap mode */
nexttbtt = intval;
else if (intval) /* NB: can be 0 for monitor mode */
nexttbtt = roundup(nexttbtt, intval);
DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
__func__, nexttbtt, intval, ni->ni_intval);
if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
HAL_BEACON_STATE bs;
int dtimperiod, dtimcount;
int cfpperiod, cfpcount;
/*
* Setup dtim and cfp parameters according to
* last beacon we received (which may be none).
*/
dtimperiod = ni->ni_dtim_period;
if (dtimperiod <= 0) /* NB: 0 if not known */
dtimperiod = 1;
dtimcount = ni->ni_dtim_count;
if (dtimcount >= dtimperiod) /* NB: sanity check */
dtimcount = 0; /* XXX? */
cfpperiod = 1; /* NB: no PCF support yet */
cfpcount = 0;
/*
* Pull nexttbtt forward to reflect the current
* TSF and calculate dtim+cfp state for the result.
*/
tsf = ath_hal_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: beacon tsf=%llu, hw tsf=%llu, nexttbtt=%u, tsftu=%u\n",
__func__,
(unsigned long long) tsf_beacon,
(unsigned long long) tsf,
nexttbtt,
tsftu);
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: beacon tsf=%llu, hw tsf=%llu, tsf delta=%lld\n",
__func__,
(unsigned long long) tsf_beacon,
(unsigned long long) tsf,
(long long) tsf -
(long long) tsf_beacon);
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: nexttbtt=%llu, beacon tsf delta=%lld\n",
__func__,
(unsigned long long) nexttbtt,
(long long) ((long long) nexttbtt * 1024LL) - (long long) tsf_beacon);
/* XXX cfpcount? */
if (nexttbtt > tsftu) {
uint32_t countdiff, oldtbtt, remainder;
oldtbtt = nexttbtt;
remainder = (nexttbtt - tsftu) % intval;
nexttbtt = tsftu + remainder;
countdiff = (oldtbtt - nexttbtt) / intval % dtimperiod;
if (dtimcount > countdiff) {
dtimcount -= countdiff;
} else {
dtimcount += dtimperiod - countdiff;
}
} else { //nexttbtt <= tsftu
uint32_t countdiff, oldtbtt, remainder;
oldtbtt = nexttbtt;
remainder = (tsftu - nexttbtt) % intval;
nexttbtt = tsftu - remainder + intval;
countdiff = (nexttbtt - oldtbtt) / intval % dtimperiod;
if (dtimcount > countdiff) {
dtimcount -= countdiff;
} else {
dtimcount += dtimperiod - countdiff;
}
}
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: adj nexttbtt=%llu, rx tsf delta=%lld\n",
__func__,
(unsigned long long) nexttbtt,
(long long) ((long long)nexttbtt * 1024LL) - (long long)tsf);
memset(&bs, 0, sizeof(bs));
bs.bs_intval = intval;
bs.bs_nexttbtt = nexttbtt;
bs.bs_dtimperiod = dtimperiod*intval;
bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
bs.bs_cfpmaxduration = 0;
#if 0
/*
* The 802.11 layer records the offset to the DTIM
* bitmap while receiving beacons; use it here to
* enable h/w detection of our AID being marked in
* the bitmap vector (to indicate frames for us are
* pending at the AP).
* XXX do DTIM handling in s/w to WAR old h/w bugs
* XXX enable based on h/w rev for newer chips
*/
bs.bs_timoffset = ni->ni_timoff;
#endif
/*
* Calculate the number of consecutive beacons to miss
* before taking a BMISS interrupt.
* Note that we clamp the result to at most 10 beacons.
*/
bs.bs_bmissthreshold = vap->iv_bmissthreshold;
if (bs.bs_bmissthreshold > 10)
bs.bs_bmissthreshold = 10;
else if (bs.bs_bmissthreshold <= 0)
bs.bs_bmissthreshold = 1;
/*
* Calculate sleep duration. The configuration is
* given in ms. We insure a multiple of the beacon
* period is used. Also, if the sleep duration is
* greater than the DTIM period then it makes senses
* to make it a multiple of that.
*
* XXX fixed at 100ms
*/
bs.bs_sleepduration =
roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
if (bs.bs_sleepduration > bs.bs_dtimperiod)
bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u "
"nextdtim %u bmiss %u sleep %u cfp:period %u "
"maxdur %u next %u timoffset %u\n"
, __func__
, tsf
, tsftu
, bs.bs_intval
, bs.bs_nexttbtt
, bs.bs_dtimperiod
, bs.bs_nextdtim
, bs.bs_bmissthreshold
, bs.bs_sleepduration
, bs.bs_cfpperiod
, bs.bs_cfpmaxduration
, bs.bs_cfpnext
, bs.bs_timoffset
);
ath_hal_intrset(ah, 0);
ath_hal_beacontimers(ah, &bs);
sc->sc_imask |= HAL_INT_BMISS;
ath_hal_intrset(ah, sc->sc_imask);
} else {
ath_hal_intrset(ah, 0);
if (nexttbtt == intval)
intval |= HAL_BEACON_RESET_TSF;
if (ic->ic_opmode == IEEE80211_M_IBSS) {
/*
* In IBSS mode enable the beacon timers but only
* enable SWBA interrupts if we need to manually
* prepare beacon frames. Otherwise we use a
* self-linked tx descriptor and let the hardware
* deal with things.
*/
intval |= HAL_BEACON_ENA;
if (!sc->sc_hasveol)
sc->sc_imask |= HAL_INT_SWBA;
if ((intval & HAL_BEACON_RESET_TSF) == 0) {
/*
* Pull nexttbtt forward to reflect
* the current TSF.
*/
tsf = ath_hal_gettsf64(ah);
tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
do {
nexttbtt += intval;
} while (nexttbtt < tsftu);
}
ath_beaconq_config(sc);
} else if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS) {
/*
* In AP/mesh mode we enable the beacon timers
* and SWBA interrupts to prepare beacon frames.
*/
intval |= HAL_BEACON_ENA;
sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
ath_beaconq_config(sc);
}
/*
* Now dirty things because for now, the EDMA HAL has
* nexttbtt and intval is TU/8.
*/
if (sc->sc_isedma) {
nexttbtt_u8 = (nexttbtt << 3);
intval_u8 = (intval << 3);
if (intval & HAL_BEACON_ENA)
intval_u8 |= HAL_BEACON_ENA;
if (intval & HAL_BEACON_RESET_TSF)
intval_u8 |= HAL_BEACON_RESET_TSF;
ath_hal_beaconinit(ah, nexttbtt_u8, intval_u8);
} else
ath_hal_beaconinit(ah, nexttbtt, intval);
sc->sc_bmisscount = 0;
ath_hal_intrset(ah, sc->sc_imask);
/*
* When using a self-linked beacon descriptor in
* ibss mode load it once here.
*/
if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
ath_beacon_start_adhoc(sc, vap);
}
ieee80211_free_node(ni);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
#undef FUDGE
#undef TSF_TO_TU
}
Index: head/sys/dev/ath/if_ath_debug.h
===================================================================
--- head/sys/dev/ath/if_ath_debug.h (revision 287196)
+++ head/sys/dev/ath/if_ath_debug.h (revision 287197)
@@ -1,125 +1,122 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
#ifndef __IF_ATH_DEBUG_H__
#define __IF_ATH_DEBUG_H__
#ifdef ATH_DEBUG
enum {
ATH_DEBUG_XMIT = 0x000000001ULL, /* basic xmit operation */
ATH_DEBUG_XMIT_DESC = 0x000000002ULL, /* xmit descriptors */
ATH_DEBUG_RECV = 0x000000004ULL, /* basic recv operation */
ATH_DEBUG_RECV_DESC = 0x000000008ULL, /* recv descriptors */
ATH_DEBUG_RATE = 0x000000010ULL, /* rate control */
ATH_DEBUG_RESET = 0x000000020ULL, /* reset processing */
ATH_DEBUG_MODE = 0x000000040ULL, /* mode init/setup */
ATH_DEBUG_BEACON = 0x000000080ULL, /* beacon handling */
ATH_DEBUG_WATCHDOG = 0x000000100ULL, /* watchdog timeout */
ATH_DEBUG_INTR = 0x000001000ULL, /* ISR */
ATH_DEBUG_TX_PROC = 0x000002000ULL, /* tx ISR proc */
ATH_DEBUG_RX_PROC = 0x000004000ULL, /* rx ISR proc */
ATH_DEBUG_BEACON_PROC = 0x000008000ULL, /* beacon ISR proc */
ATH_DEBUG_CALIBRATE = 0x000010000ULL, /* periodic calibration */
ATH_DEBUG_KEYCACHE = 0x000020000ULL, /* key cache management */
ATH_DEBUG_STATE = 0x000040000ULL, /* 802.11 state transitions */
ATH_DEBUG_NODE = 0x000080000ULL, /* node management */
ATH_DEBUG_LED = 0x000100000ULL, /* led management */
ATH_DEBUG_FF = 0x000200000ULL, /* fast frames */
ATH_DEBUG_DFS = 0x000400000ULL, /* DFS processing */
ATH_DEBUG_TDMA = 0x000800000ULL, /* TDMA processing */
ATH_DEBUG_TDMA_TIMER = 0x001000000ULL, /* TDMA timer processing */
ATH_DEBUG_REGDOMAIN = 0x002000000ULL, /* regulatory processing */
ATH_DEBUG_SW_TX = 0x004000000ULL, /* per-packet software TX */
ATH_DEBUG_SW_TX_BAW = 0x008000000ULL, /* BAW handling */
ATH_DEBUG_SW_TX_CTRL = 0x010000000ULL, /* queue control */
ATH_DEBUG_SW_TX_AGGR = 0x020000000ULL, /* aggregate TX */
ATH_DEBUG_SW_TX_RETRIES = 0x040000000ULL, /* software TX retries */
ATH_DEBUG_FATAL = 0x080000000ULL, /* fatal errors */
ATH_DEBUG_SW_TX_BAR = 0x100000000ULL, /* BAR TX */
ATH_DEBUG_EDMA_RX = 0x200000000ULL, /* RX EDMA state */
ATH_DEBUG_SW_TX_FILT = 0x400000000ULL, /* SW TX FF */
ATH_DEBUG_NODE_PWRSAVE = 0x800000000ULL, /* node powersave */
ATH_DEBUG_DIVERSITY = 0x1000000000ULL, /* Diversity logic */
ATH_DEBUG_PWRSAVE = 0x2000000000ULL,
ATH_DEBUG_ANY = 0xffffffffffffffffULL
};
enum {
ATH_KTR_RXPROC = 0x00000001,
ATH_KTR_TXPROC = 0x00000002,
ATH_KTR_TXCOMP = 0x00000004,
ATH_KTR_SWQ = 0x00000008,
ATH_KTR_INTERRUPTS = 0x00000010,
ATH_KTR_ERROR = 0x00000020,
ATH_KTR_NODE = 0x00000040,
ATH_KTR_TX = 0x00000080,
};
#define ATH_KTR(_sc, _km, _kf, ...) do { \
if (sc->sc_ktrdebug & (_km)) \
CTR##_kf(KTR_DEV, __VA_ARGS__); \
} while (0)
extern uint64_t ath_debug;
-#define IFF_DUMPPKTS(sc, m) \
- ((sc->sc_debug & (m)) || \
- (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
+#define IFF_DUMPPKTS(sc, m) (sc->sc_debug & (m))
#define DPRINTF(sc, m, fmt, ...) do { \
if (sc->sc_debug & (m)) \
device_printf(sc->sc_dev, fmt, __VA_ARGS__); \
} while (0)
#define KEYPRINTF(sc, ix, hk, mac) do { \
if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \
ath_keyprint(sc, __func__, ix, hk, mac); \
} while (0)
extern void ath_printrxbuf(struct ath_softc *, const struct ath_buf *bf,
u_int ix, int);
extern void ath_printtxbuf(struct ath_softc *, const struct ath_buf *bf,
u_int qnum, u_int ix, int done);
extern void ath_printtxstatbuf(struct ath_softc *sc, const struct ath_buf *bf,
const uint32_t *ds, u_int qnum, u_int ix, int done);
#else /* ATH_DEBUG */
#define ATH_KTR(_sc, _km, _kf, ...) do { } while (0)
-#define IFF_DUMPPKTS(sc, m) \
- ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
+#define IFF_DUMPPKTS(sc, m) (0)
#define DPRINTF(sc, m, fmt, ...) do { \
(void) sc; \
} while (0)
#define KEYPRINTF(sc, k, ix, mac) do { \
(void) sc; \
} while (0)
#endif /* ATH_DEBUG */
#endif
Index: head/sys/dev/ath/if_ath_misc.h
===================================================================
--- head/sys/dev/ath/if_ath_misc.h (revision 287196)
+++ head/sys/dev/ath/if_ath_misc.h (revision 287197)
@@ -1,163 +1,163 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
#ifndef __IF_ATH_MISC_H__
#define __IF_ATH_MISC_H__
/*
* This is where definitions for "public things" in if_ath.c
* will go for the time being.
*
* Anything in here should eventually be moved out of if_ath.c
* and into something else.
*/
/* unaligned little endian access */
#define LE_READ_2(p) \
((u_int16_t) \
((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8)))
#define LE_READ_4(p) \
((u_int32_t) \
((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \
(((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
extern int ath_rxbuf;
extern int ath_txbuf;
extern int ath_txbuf_mgmt;
extern int ath_tx_findrix(const struct ath_softc *sc, uint8_t rate);
extern struct ath_buf * ath_getbuf(struct ath_softc *sc,
ath_buf_type_t btype);
extern struct ath_buf * _ath_getbuf_locked(struct ath_softc *sc,
ath_buf_type_t btype);
extern struct ath_buf * ath_buf_clone(struct ath_softc *sc,
struct ath_buf *bf);
/* XXX change this to NULL the buffer pointer? */
extern void ath_freebuf(struct ath_softc *sc, struct ath_buf *bf);
extern void ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf);
extern void ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf);
-extern int ath_reset(struct ifnet *, ATH_RESET_TYPE);
+extern int ath_reset(struct ath_softc *, ATH_RESET_TYPE);
extern void ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf,
int fail);
extern void ath_tx_update_ratectrl(struct ath_softc *sc,
struct ieee80211_node *ni, struct ath_rc_series *rc,
struct ath_tx_status *ts, int frmlen, int nframes, int nbad);
extern int ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask,
uint32_t *hangs);
extern void ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf,
int status);
extern void ath_txq_freeholdingbuf(struct ath_softc *sc,
struct ath_txq *txq);
extern void ath_txqmove(struct ath_txq *dst, struct ath_txq *src);
extern void ath_mode_init(struct ath_softc *sc);
extern void ath_setdefantenna(struct ath_softc *sc, u_int antenna);
extern void ath_setslottime(struct ath_softc *sc);
extern int ath_descdma_alloc_desc(struct ath_softc *sc,
struct ath_descdma *dd, ath_bufhead *head, const char *name,
int ds_size, int ndesc);
extern int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
ath_bufhead *head, const char *name, int ds_size, int nbuf,
int ndesc);
extern int ath_descdma_setup_rx_edma(struct ath_softc *sc,
struct ath_descdma *dd, ath_bufhead *head, const char *name,
int nbuf, int desclen);
extern void ath_descdma_cleanup(struct ath_softc *sc,
struct ath_descdma *dd, ath_bufhead *head);
extern void ath_legacy_attach_comp_func(struct ath_softc *sc);
extern void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq);
extern void ath_legacy_tx_drain(struct ath_softc *sc,
ATH_RESET_TYPE reset_type);
extern void ath_tx_process_buf_completion(struct ath_softc *sc,
struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf);
extern int ath_stoptxdma(struct ath_softc *sc);
extern void ath_tx_update_tim(struct ath_softc *sc,
struct ieee80211_node *ni, int enable);
/*
* This is only here so that the RX proc function can call it.
* It's very likely that the "start TX after RX" call should be
* done via something in if_ath.c, moving "rx tasklet" into
* if_ath.c and do the ath_start() call there. Once that's done,
* we can kill this.
*/
extern void ath_start(struct ifnet *ifp);
extern void ath_start_task(void *arg, int npending);
extern void ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq);
/*
* Power state tracking.
*/
extern void _ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line);
extern void _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line);
extern void _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line);
extern void _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line);
#define ath_power_setpower(sc, ps) _ath_power_setpower(sc, ps, __FILE__, __LINE__)
#define ath_power_setselfgen(sc, ps) _ath_power_set_selfgen(sc, ps, __FILE__, __LINE__)
#define ath_power_set_power_state(sc, ps) _ath_power_set_power_state(sc, ps, __FILE__, __LINE__)
#define ath_power_restore_power_state(sc) _ath_power_restore_power_state(sc, __FILE__, __LINE__)
/*
* Kick the frame TX task.
*/
static inline void
ath_tx_kick(struct ath_softc *sc)
{
/* XXX NULL for now */
}
/*
* Kick the software TX queue task.
*/
static inline void
ath_tx_swq_kick(struct ath_softc *sc)
{
taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask);
}
#endif
Index: head/sys/dev/ath/if_ath_rx.c
===================================================================
--- head/sys/dev/ath/if_ath_rx.c (revision 287196)
+++ head/sys/dev/ath/if_ath_rx.c (revision 287197)
@@ -1,1469 +1,1455 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Atheros Wireless LAN controller.
*
* This software is derived from work of Atsushi Onoe; his contribution
* is greatly appreciated.
*/
#include "opt_inet.h"
#include "opt_ath.h"
/*
* This is needed for register operations which are performed
* by the driver - eg, calls to ath_hal_gettsf32().
*
* It's also required for any AH_DEBUG checks in here, eg the
* module dependencies.
*/
#include "opt_ah.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/module.h>
#include <sys/ktr.h>
#include <sys/smp.h> /* for mp_ncpus */
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
#include <dev/ath/ath_hal/ah_diagcodes.h>
#include <dev/ath/if_ath_debug.h>
#include <dev/ath/if_ath_misc.h>
#include <dev/ath/if_ath_tsf.h>
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_sysctl.h>
#include <dev/ath/if_ath_led.h>
#include <dev/ath/if_ath_keycache.h>
#include <dev/ath/if_ath_rx.h>
#include <dev/ath/if_ath_beacon.h>
#include <dev/ath/if_athdfs.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
#ifdef ATH_DEBUG_ALQ
#include <dev/ath/if_ath_alq.h>
#endif
#include <dev/ath/if_ath_lna_div.h>
/*
* Calculate the receive filter according to the
* operating mode and state:
*
* o always accept unicast, broadcast, and multicast traffic
* o accept PHY error frames when hardware doesn't have MIB support
* to count and we need them for ANI (sta mode only until recently)
* and we are not scanning (ANI is disabled)
* NB: older hal's add rx filter bits out of sight and we need to
* blindly preserve them
* o probe request frames are accepted only when operating in
* hostap, adhoc, mesh, or monitor modes
* o enable promiscuous mode
* - when in monitor mode
* - if interface marked PROMISC (assumes bridge setting is filtered)
* o accept beacons:
* - when operating in station mode for collecting rssi data when
* the station is otherwise quiet, or
* - when operating in adhoc mode so the 802.11 layer creates
* node table entries for peers,
* - when scanning
* - when doing s/w beacon miss (e.g. for ap+sta)
* - when operating in ap mode in 11g to detect overlapping bss that
* require protection
* - when operating in mesh mode to detect neighbors
* o accept control frames:
* - when in monitor mode
* XXX HT protection for 11n
*/
u_int32_t
ath_calcrxfilter(struct ath_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
u_int32_t rfilt;
rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
if (!sc->sc_needmib && !sc->sc_scanning)
rfilt |= HAL_RX_FILTER_PHYERR;
if (ic->ic_opmode != IEEE80211_M_STA)
rfilt |= HAL_RX_FILTER_PROBEREQ;
/* XXX ic->ic_monvaps != 0? */
- if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
+ if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_promisc > 0)
rfilt |= HAL_RX_FILTER_PROM;
/*
* Only listen to all beacons if we're scanning.
*
* Otherwise we only really need to hear beacons from
* our own BSSID.
*/
if (ic->ic_opmode == IEEE80211_M_STA ||
ic->ic_opmode == IEEE80211_M_IBSS || sc->sc_swbmiss) {
if (sc->sc_do_mybeacon && ! sc->sc_scanning) {
rfilt |= HAL_RX_FILTER_MYBEACON;
} else { /* scanning, non-mybeacon chips */
rfilt |= HAL_RX_FILTER_BEACON;
}
}
/*
* NB: We don't recalculate the rx filter when
* ic_protmode changes; otherwise we could do
* this only when ic_protmode != NONE.
*/
if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
rfilt |= HAL_RX_FILTER_BEACON;
/*
* Enable hardware PS-POLL RX only for hostap mode;
* STA mode sends PS-POLL frames but never
* receives them.
*/
if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
0, NULL) == HAL_OK &&
ic->ic_opmode == IEEE80211_M_HOSTAP)
rfilt |= HAL_RX_FILTER_PSPOLL;
if (sc->sc_nmeshvaps) {
rfilt |= HAL_RX_FILTER_BEACON;
if (sc->sc_hasbmatch)
rfilt |= HAL_RX_FILTER_BSSID;
else
rfilt |= HAL_RX_FILTER_PROM;
}
if (ic->ic_opmode == IEEE80211_M_MONITOR)
rfilt |= HAL_RX_FILTER_CONTROL;
/*
* Enable RX of compressed BAR frames only when doing
* 802.11n. Required for A-MPDU.
*/
if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
rfilt |= HAL_RX_FILTER_COMPBAR;
/*
* Enable radar PHY errors if requested by the
* DFS module.
*/
if (sc->sc_dodfs)
rfilt |= HAL_RX_FILTER_PHYRADAR;
/*
* Enable spectral PHY errors if requested by the
* spectral module.
*/
if (sc->sc_dospectral)
rfilt |= HAL_RX_FILTER_PHYRADAR;
- DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
- __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
+ DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s\n",
+ __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode]);
return rfilt;
}
static int
ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
{
struct ath_hal *ah = sc->sc_ah;
int error;
struct mbuf *m;
struct ath_desc *ds;
/* XXX TODO: ATH_RX_LOCK_ASSERT(sc); */
m = bf->bf_m;
if (m == NULL) {
/*
* NB: by assigning a page to the rx dma buffer we
* implicitly satisfy the Atheros requirement that
* this buffer be cache-line-aligned and sized to be
* multiple of the cache line size. Not doing this
* causes weird stuff to happen (for the 5210 at least).
*/
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
DPRINTF(sc, ATH_DEBUG_ANY,
"%s: no mbuf/cluster\n", __func__);
sc->sc_stats.ast_rx_nombuf++;
return ENOMEM;
}
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
bf->bf_dmamap, m,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error != 0) {
DPRINTF(sc, ATH_DEBUG_ANY,
"%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
__func__, error);
sc->sc_stats.ast_rx_busdma++;
m_freem(m);
return error;
}
KASSERT(bf->bf_nseg == 1,
("multi-segment packet; nseg %u", bf->bf_nseg));
bf->bf_m = m;
}
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
/*
* Setup descriptors. For receive we always terminate
* the descriptor list with a self-linked entry so we'll
* not get overrun under high load (as can happen with a
* 5212 when ANI processing enables PHY error frames).
*
* To insure the last descriptor is self-linked we create
* each descriptor as self-linked and add it to the end. As
* each additional descriptor is added the previous self-linked
* entry is ``fixed'' naturally. This should be safe even
* if DMA is happening. When processing RX interrupts we
* never remove/process the last, self-linked, entry on the
* descriptor list. This insures the hardware always has
* someplace to write a new frame.
*/
/*
* 11N: we can no longer afford to self link the last descriptor.
* MAC acknowledges BA status as long as it copies frames to host
* buffer (or rx fifo). This can incorrectly acknowledge packets
* to a sender if last desc is self-linked.
*/
ds = bf->bf_desc;
if (sc->sc_rxslink)
ds->ds_link = bf->bf_daddr; /* link to self */
else
ds->ds_link = 0; /* terminate the list */
ds->ds_data = bf->bf_segs[0].ds_addr;
ath_hal_setuprxdesc(ah, ds
, m->m_len /* buffer size */
, 0
);
if (sc->sc_rxlink != NULL)
*sc->sc_rxlink = bf->bf_daddr;
sc->sc_rxlink = &ds->ds_link;
return 0;
}
/*
* Intercept management frames to collect beacon rssi data
* and to do ibss merges.
*/
void
ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ath_softc *sc = vap->iv_ic->ic_softc;
uint64_t tsf_beacon_old, tsf_beacon;
uint64_t nexttbtt;
int64_t tsf_delta;
int32_t tsf_delta_bmiss;
int32_t tsf_remainder;
uint64_t tsf_beacon_target;
int tsf_intval;
tsf_beacon_old = ((uint64_t) LE_READ_4(ni->ni_tstamp.data + 4)) << 32;
tsf_beacon_old |= LE_READ_4(ni->ni_tstamp.data);
#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
tsf_intval = 1;
if (ni->ni_intval > 0) {
tsf_intval = TU_TO_TSF(ni->ni_intval);
}
#undef TU_TO_TSF
/*
* Call up first so subsequent work can use information
* potentially stored in the node (e.g. for ibss merge).
*/
ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rxs, rssi, nf);
switch (subtype) {
case IEEE80211_FC0_SUBTYPE_BEACON:
/* update rssi statistics for use by the hal */
/* XXX unlocked check against vap->iv_bss? */
ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
tsf_beacon = ((uint64_t) LE_READ_4(ni->ni_tstamp.data + 4)) << 32;
tsf_beacon |= LE_READ_4(ni->ni_tstamp.data);
nexttbtt = ath_hal_getnexttbtt(sc->sc_ah);
/*
* Let's calculate the delta and remainder, so we can see
* if the beacon timer from the AP is varying by more than
* a few TU. (Which would be a huge, huge problem.)
*/
tsf_delta = (long long) tsf_beacon - (long long) tsf_beacon_old;
tsf_delta_bmiss = tsf_delta / tsf_intval;
/*
* If our delta is greater than half the beacon interval,
* let's round the bmiss value up to the next beacon
* interval. Ie, we're running really, really early
* on the next beacon.
*/
if (tsf_delta % tsf_intval > (tsf_intval / 2))
tsf_delta_bmiss ++;
tsf_beacon_target = tsf_beacon_old +
(((unsigned long long) tsf_delta_bmiss) * (long long) tsf_intval);
/*
* The remainder using '%' is between 0 .. intval-1.
* If we're actually running too fast, then the remainder
* will be some large number just under intval-1.
* So we need to look at whether we're running
* before or after the target beacon interval
* and if we are, modify how we do the remainder
* calculation.
*/
if (tsf_beacon < tsf_beacon_target) {
tsf_remainder =
-(tsf_intval - ((tsf_beacon - tsf_beacon_old) % tsf_intval));
} else {
tsf_remainder = (tsf_beacon - tsf_beacon_old) % tsf_intval;
}
DPRINTF(sc, ATH_DEBUG_BEACON, "%s: old_tsf=%llu, new_tsf=%llu, target_tsf=%llu, delta=%lld, bmiss=%d, remainder=%d\n",
__func__,
(unsigned long long) tsf_beacon_old,
(unsigned long long) tsf_beacon,
(unsigned long long) tsf_beacon_target,
(long long) tsf_delta,
tsf_delta_bmiss,
tsf_remainder);
DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf=%llu, nexttbtt=%llu, delta=%d\n",
__func__,
(unsigned long long) tsf_beacon,
(unsigned long long) nexttbtt,
(int32_t) tsf_beacon - (int32_t) nexttbtt + tsf_intval);
if (sc->sc_syncbeacon &&
ni == vap->iv_bss &&
(vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) {
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: syncbeacon=1; syncing\n",
__func__);
/*
* Resync beacon timers using the tsf of the beacon
* frame we just received.
*/
ath_beacon_config(sc, vap);
sc->sc_syncbeacon = 0;
}
/* fall thru... */
case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
if (vap->iv_opmode == IEEE80211_M_IBSS &&
vap->iv_state == IEEE80211_S_RUN) {
uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
uint64_t tsf = ath_extend_tsf(sc, rstamp,
ath_hal_gettsf64(sc->sc_ah));
/*
* Handle ibss merge as needed; check the tsf on the
* frame before attempting the merge. The 802.11 spec
* says the station should change it's bssid to match
* the oldest station with the same ssid, where oldest
* is determined by the tsf. Note that hardware
* reconfiguration happens through callback to
* ath_newstate as the state machine will go from
* RUN -> RUN when this happens.
*/
if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
DPRINTF(sc, ATH_DEBUG_STATE,
"ibss merge, rstamp %u tsf %ju "
"tstamp %ju\n", rstamp, (uintmax_t)tsf,
(uintmax_t)ni->ni_tstamp.tsf);
(void) ieee80211_ibss_merge(ni);
}
}
break;
}
}
#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
static void
-ath_rx_tap_vendor(struct ifnet *ifp, struct mbuf *m,
+ath_rx_tap_vendor(struct ath_softc *sc, struct mbuf *m,
const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
{
- struct ath_softc *sc = ifp->if_softc;
/* Fill in the extension bitmap */
sc->sc_rx_th.wr_ext_bitmap = htole32(1 << ATH_RADIOTAP_VENDOR_HEADER);
/* Fill in the vendor header */
sc->sc_rx_th.wr_vh.vh_oui[0] = 0x7f;
sc->sc_rx_th.wr_vh.vh_oui[1] = 0x03;
sc->sc_rx_th.wr_vh.vh_oui[2] = 0x00;
/* XXX what should this be? */
sc->sc_rx_th.wr_vh.vh_sub_ns = 0;
sc->sc_rx_th.wr_vh.vh_skip_len =
htole16(sizeof(struct ath_radiotap_vendor_hdr));
/* General version info */
sc->sc_rx_th.wr_v.vh_version = 1;
sc->sc_rx_th.wr_v.vh_rx_chainmask = sc->sc_rxchainmask;
/* rssi */
sc->sc_rx_th.wr_v.rssi_ctl[0] = rs->rs_rssi_ctl[0];
sc->sc_rx_th.wr_v.rssi_ctl[1] = rs->rs_rssi_ctl[1];
sc->sc_rx_th.wr_v.rssi_ctl[2] = rs->rs_rssi_ctl[2];
sc->sc_rx_th.wr_v.rssi_ext[0] = rs->rs_rssi_ext[0];
sc->sc_rx_th.wr_v.rssi_ext[1] = rs->rs_rssi_ext[1];
sc->sc_rx_th.wr_v.rssi_ext[2] = rs->rs_rssi_ext[2];
/* evm */
sc->sc_rx_th.wr_v.evm[0] = rs->rs_evm0;
sc->sc_rx_th.wr_v.evm[1] = rs->rs_evm1;
sc->sc_rx_th.wr_v.evm[2] = rs->rs_evm2;
/* These are only populated from the AR9300 or later */
sc->sc_rx_th.wr_v.evm[3] = rs->rs_evm3;
sc->sc_rx_th.wr_v.evm[4] = rs->rs_evm4;
/* direction */
sc->sc_rx_th.wr_v.vh_flags = ATH_VENDOR_PKT_RX;
/* RX rate */
sc->sc_rx_th.wr_v.vh_rx_hwrate = rs->rs_rate;
/* RX flags */
sc->sc_rx_th.wr_v.vh_rs_flags = rs->rs_flags;
if (rs->rs_isaggr)
sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_ISAGGR;
if (rs->rs_moreaggr)
sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_MOREAGGR;
/* phyerr info */
if (rs->rs_status & HAL_RXERR_PHY) {
sc->sc_rx_th.wr_v.vh_phyerr_code = rs->rs_phyerr;
sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_RXPHYERR;
} else {
sc->sc_rx_th.wr_v.vh_phyerr_code = 0xff;
}
sc->sc_rx_th.wr_v.vh_rs_status = rs->rs_status;
sc->sc_rx_th.wr_v.vh_rssi = rs->rs_rssi;
}
#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
static void
-ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
+ath_rx_tap(struct ath_softc *sc, struct mbuf *m,
const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
{
#define CHAN_HT20 htole32(IEEE80211_CHAN_HT20)
#define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U)
#define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D)
#define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
- struct ath_softc *sc = ifp->if_softc;
const HAL_RATE_TABLE *rt;
uint8_t rix;
rt = sc->sc_currates;
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
rix = rt->rateCodeToIndex[rs->rs_rate];
sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
#ifdef AH_SUPPORT_AR5416
sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
if (rs->rs_status & HAL_RXERR_PHY) {
/*
* PHY error - make sure the channel flags
* reflect the actual channel configuration,
* not the received frame.
*/
if (IEEE80211_IS_CHAN_HT40U(sc->sc_curchan))
sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
else if (IEEE80211_IS_CHAN_HT40D(sc->sc_curchan))
sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
else if (IEEE80211_IS_CHAN_HT20(sc->sc_curchan))
sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
} else if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
if ((rs->rs_flags & HAL_RX_2040) == 0)
sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
else
sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
if ((rs->rs_flags & HAL_RX_GI) == 0)
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
}
#endif
sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf));
if (rs->rs_status & HAL_RXERR_CRC)
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
/* XXX propagate other error flags from descriptor */
sc->sc_rx_th.wr_antnoise = nf;
sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
sc->sc_rx_th.wr_antenna = rs->rs_antenna;
#undef CHAN_HT
#undef CHAN_HT20
#undef CHAN_HT40U
#undef CHAN_HT40D
}
static void
ath_handle_micerror(struct ieee80211com *ic,
struct ieee80211_frame *wh, int keyix)
{
struct ieee80211_node *ni;
/* XXX recheck MIC to deal w/ chips that lie */
/* XXX discard MIC errors on !data frames */
ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
if (ni != NULL) {
ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
ieee80211_free_node(ni);
}
}
/*
* Process a single packet.
*
* The mbuf must already be synced, unmapped and removed from bf->bf_m
* by this stage.
*
* The mbuf must be consumed by this routine - either passed up the
* net80211 stack, put on the holding queue, or freed.
*/
int
ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status,
uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf,
struct mbuf *m)
{
uint64_t rstamp;
int len, type;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
int is_good = 0;
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
/*
* Calculate the correct 64 bit TSF given
* the TSF64 register value and rs_tstamp.
*/
rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
/* These aren't specifically errors */
#ifdef AH_SUPPORT_AR5416
if (rs->rs_flags & HAL_RX_GI)
sc->sc_stats.ast_rx_halfgi++;
if (rs->rs_flags & HAL_RX_2040)
sc->sc_stats.ast_rx_2040++;
if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
sc->sc_stats.ast_rx_pre_crc_err++;
if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
sc->sc_stats.ast_rx_post_crc_err++;
if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
sc->sc_stats.ast_rx_decrypt_busy_err++;
if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
sc->sc_stats.ast_rx_hi_rx_chain++;
if (rs->rs_flags & HAL_RX_STBC)
sc->sc_stats.ast_rx_stbc++;
#endif /* AH_SUPPORT_AR5416 */
if (rs->rs_status != 0) {
if (rs->rs_status & HAL_RXERR_CRC)
sc->sc_stats.ast_rx_crcerr++;
if (rs->rs_status & HAL_RXERR_FIFO)
sc->sc_stats.ast_rx_fifoerr++;
if (rs->rs_status & HAL_RXERR_PHY) {
sc->sc_stats.ast_rx_phyerr++;
/* Process DFS radar events */
if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
(rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
/* Now pass it to the radar processing code */
ath_dfs_process_phy_err(sc, m, rstamp, rs);
}
/* Be suitably paranoid about receiving phy errors out of the stats array bounds */
if (rs->rs_phyerr < 64)
sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
goto rx_error; /* NB: don't count in ierrors */
}
if (rs->rs_status & HAL_RXERR_DECRYPT) {
/*
* Decrypt error. If the error occurred
* because there was no hardware key, then
* let the frame through so the upper layers
* can process it. This is necessary for 5210
* parts which have no way to setup a ``clear''
* key cache entry.
*
* XXX do key cache faulting
*/
if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
goto rx_accept;
sc->sc_stats.ast_rx_badcrypt++;
}
/*
* Similar as above - if the failure was a keymiss
* just punt it up to the upper layers for now.
*/
if (rs->rs_status & HAL_RXERR_KEYMISS) {
sc->sc_stats.ast_rx_keymiss++;
goto rx_accept;
}
if (rs->rs_status & HAL_RXERR_MIC) {
sc->sc_stats.ast_rx_badmic++;
/*
* Do minimal work required to hand off
* the 802.11 header for notification.
*/
/* XXX frag's and qos frames */
len = rs->rs_datalen;
if (len >= sizeof (struct ieee80211_frame)) {
ath_handle_micerror(ic,
mtod(m, struct ieee80211_frame *),
sc->sc_splitmic ?
rs->rs_keyix-32 : rs->rs_keyix);
}
}
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
rx_error:
/*
* Cleanup any pending partial frame.
*/
if (re->m_rxpending != NULL) {
m_freem(re->m_rxpending);
re->m_rxpending = NULL;
}
/*
* When a tap is present pass error frames
* that have been requested. By default we
* pass decrypt+mic errors but others may be
* interesting (e.g. crc).
*/
if (ieee80211_radiotap_active(ic) &&
(rs->rs_status & sc->sc_monpass)) {
/* NB: bpf needs the mbuf length setup */
len = rs->rs_datalen;
m->m_pkthdr.len = m->m_len = len;
- ath_rx_tap(ifp, m, rs, rstamp, nf);
+ ath_rx_tap(sc, m, rs, rstamp, nf);
#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
- ath_rx_tap_vendor(ifp, m, rs, rstamp, nf);
+ ath_rx_tap_vendor(sc, m, rs, rstamp, nf);
#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
ieee80211_radiotap_rx_all(ic, m);
}
/* XXX pass MIC errors up for s/w reclaculation */
m_freem(m); m = NULL;
goto rx_next;
}
rx_accept:
len = rs->rs_datalen;
m->m_len = len;
if (rs->rs_more) {
/*
* Frame spans multiple descriptors; save
* it for the next completed descriptor, it
* will be used to construct a jumbogram.
*/
if (re->m_rxpending != NULL) {
/* NB: max frame size is currently 2 clusters */
sc->sc_stats.ast_rx_toobig++;
m_freem(re->m_rxpending);
}
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = len;
re->m_rxpending = m;
m = NULL;
goto rx_next;
} else if (re->m_rxpending != NULL) {
/*
* This is the second part of a jumbogram,
* chain it to the first mbuf, adjust the
* frame length, and clear the rxpending state.
*/
re->m_rxpending->m_next = m;
re->m_rxpending->m_pkthdr.len += len;
m = re->m_rxpending;
re->m_rxpending = NULL;
} else {
/*
- * Normal single-descriptor receive; setup
- * the rcvif and packet length.
+ * Normal single-descriptor receive; setup packet length.
*/
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = len;
}
/*
* Validate rs->rs_antenna.
*
* Some users w/ AR9285 NICs have reported crashes
* here because rs_antenna field is bogusly large.
* Let's enforce the maximum antenna limit of 8
* (and it shouldn't be hard coded, but that's a
* separate problem) and if there's an issue, print
* out an error and adjust rs_antenna to something
* sensible.
*
* This code should be removed once the actual
* root cause of the issue has been identified.
* For example, it may be that the rs_antenna
* field is only valid for the lsat frame of
* an aggregate and it just happens that it is
* "mostly" right. (This is a general statement -
* the majority of the statistics are only valid
* for the last frame in an aggregate.
*/
if (rs->rs_antenna > 7) {
device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n",
__func__, rs->rs_antenna);
#ifdef ATH_DEBUG
ath_printrxbuf(sc, bf, 0, status == HAL_OK);
#endif /* ATH_DEBUG */
rs->rs_antenna = 0; /* XXX better than nothing */
}
/*
* If this is an AR9285/AR9485, then the receive and LNA
* configuration is stored in RSSI[2] / EXTRSSI[2].
* We can extract this out to build a much better
* receive antenna profile.
*
* Yes, this just blurts over the above RX antenna field
* for now. It's fine, the AR9285 doesn't really use
* that.
*
* Later on we should store away the fine grained LNA
* information and keep separate counters just for
* that. It'll help when debugging the AR9285/AR9485
* combined diversity code.
*/
if (sc->sc_rx_lnamixer) {
rs->rs_antenna = 0;
/* Bits 0:1 - the LNA configuration used */
rs->rs_antenna |=
((rs->rs_rssi_ctl[2] & HAL_RX_LNA_CFG_USED)
>> HAL_RX_LNA_CFG_USED_S);
/* Bit 2 - the external RX antenna switch */
if (rs->rs_rssi_ctl[2] & HAL_RX_LNA_EXTCFG)
rs->rs_antenna |= 0x4;
}
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
/*
* Populate the rx status block. When there are bpf
* listeners we do the additional work to provide
* complete status. Otherwise we fill in only the
* material required by ieee80211_input. Note that
* noise setting is filled in above.
*/
if (ieee80211_radiotap_active(ic)) {
- ath_rx_tap(ifp, m, rs, rstamp, nf);
+ ath_rx_tap(sc, m, rs, rstamp, nf);
#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
- ath_rx_tap_vendor(ifp, m, rs, rstamp, nf);
+ ath_rx_tap_vendor(sc, m, rs, rstamp, nf);
#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
}
/*
* From this point on we assume the frame is at least
* as large as ieee80211_frame_min; verify that.
*/
if (len < IEEE80211_MIN_LEN) {
if (!ieee80211_radiotap_active(ic)) {
DPRINTF(sc, ATH_DEBUG_RECV,
"%s: short packet %d\n", __func__, len);
sc->sc_stats.ast_rx_tooshort++;
} else {
/* NB: in particular this captures ack's */
ieee80211_radiotap_rx_all(ic, m);
}
m_freem(m); m = NULL;
goto rx_next;
}
if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
const HAL_RATE_TABLE *rt = sc->sc_currates;
uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
}
m_adj(m, -IEEE80211_CRC_LEN);
/*
* Locate the node for sender, track state, and then
* pass the (referenced) node up to the 802.11 layer
* for its use.
*/
ni = ieee80211_find_rxnode_withkey(ic,
mtod(m, const struct ieee80211_frame_min *),
rs->rs_keyix == HAL_RXKEYIX_INVALID ?
IEEE80211_KEYIX_NONE : rs->rs_keyix);
sc->sc_lastrs = rs;
#ifdef AH_SUPPORT_AR5416
if (rs->rs_isaggr)
sc->sc_stats.ast_rx_agg++;
#endif /* AH_SUPPORT_AR5416 */
if (ni != NULL) {
/*
* Only punt packets for ampdu reorder processing for
* 11n nodes; net80211 enforces that M_AMPDU is only
* set for 11n nodes.
*/
if (ni->ni_flags & IEEE80211_NODE_HT)
m->m_flags |= M_AMPDU;
/*
* Sending station is known, dispatch directly.
*/
type = ieee80211_input(ni, m, rs->rs_rssi, nf);
ieee80211_free_node(ni);
m = NULL;
/*
* Arrange to update the last rx timestamp only for
* frames from our ap when operating in station mode.
* This assumes the rx key is always setup when
* associated.
*/
if (ic->ic_opmode == IEEE80211_M_STA &&
rs->rs_keyix != HAL_RXKEYIX_INVALID)
is_good = 1;
} else {
type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
m = NULL;
}
/*
* At this point we have passed the frame up the stack; thus
* the mbuf is no longer ours.
*/
/*
* Track rx rssi and do any rx antenna management.
*/
ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
if (sc->sc_diversity) {
/*
* When using fast diversity, change the default rx
* antenna if diversity chooses the other antenna 3
* times in a row.
*/
if (sc->sc_defant != rs->rs_antenna) {
if (++sc->sc_rxotherant >= 3)
ath_setdefantenna(sc, rs->rs_antenna);
} else
sc->sc_rxotherant = 0;
}
/* Handle slow diversity if enabled */
if (sc->sc_dolnadiv) {
ath_lna_rx_comb_scan(sc, rs, ticks, hz);
}
if (sc->sc_softled) {
/*
* Blink for any data frame. Otherwise do a
* heartbeat-style blink when idle. The latter
* is mainly for station mode where we depend on
* periodic beacon frames to trigger the poll event.
*/
if (type == IEEE80211_FC0_TYPE_DATA) {
const HAL_RATE_TABLE *rt = sc->sc_currates;
ath_led_event(sc,
rt->rateCodeToIndex[rs->rs_rate]);
} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
ath_led_event(sc, 0);
}
rx_next:
/*
* Debugging - complain if we didn't NULL the mbuf pointer
* here.
*/
if (m != NULL) {
device_printf(sc->sc_dev,
"%s: mbuf %p should've been freed!\n",
__func__,
m);
}
return (is_good);
}
#define ATH_RX_MAX 128
/*
* XXX TODO: break out the "get buffers" from "call ath_rx_pkt()" like
* the EDMA code does.
*
* XXX TODO: then, do all of the RX list management stuff inside
* ATH_RX_LOCK() so we don't end up potentially racing. The EDMA
* code is doing it right.
*/
static void
ath_rx_proc(struct ath_softc *sc, int resched)
{
#define PA2DESC(_sc, _pa) \
((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
struct ath_buf *bf;
- struct ifnet *ifp = sc->sc_ifp;
struct ath_hal *ah = sc->sc_ah;
#ifdef IEEE80211_SUPPORT_SUPERG
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
#endif
struct ath_desc *ds;
struct ath_rx_status *rs;
struct mbuf *m;
int ngood;
HAL_STATUS status;
int16_t nf;
u_int64_t tsf;
int npkts = 0;
int kickpcu = 0;
int ret;
/* XXX we must not hold the ATH_LOCK here */
ATH_UNLOCK_ASSERT(sc);
ATH_PCU_UNLOCK_ASSERT(sc);
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt++;
kickpcu = sc->sc_kickpcu;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
ngood = 0;
nf = ath_hal_getchannoise(ah, sc->sc_curchan);
sc->sc_stats.ast_rx_noise = nf;
tsf = ath_hal_gettsf64(ah);
do {
/*
* Don't process too many packets at a time; give the
* TX thread time to also run - otherwise the TX
* latency can jump by quite a bit, causing throughput
* degredation.
*/
if (!kickpcu && npkts >= ATH_RX_MAX)
break;
bf = TAILQ_FIRST(&sc->sc_rxbuf);
if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */
device_printf(sc->sc_dev, "%s: no buffer!\n", __func__);
break;
} else if (bf == NULL) {
/*
* End of List:
* this can happen for non-self-linked RX chains
*/
sc->sc_stats.ast_rx_hitqueueend++;
break;
}
m = bf->bf_m;
if (m == NULL) { /* NB: shouldn't happen */
/*
* If mbuf allocation failed previously there
* will be no mbuf; try again to re-populate it.
*/
/* XXX make debug msg */
device_printf(sc->sc_dev, "%s: no mbuf!\n", __func__);
TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
goto rx_proc_next;
}
ds = bf->bf_desc;
if (ds->ds_link == bf->bf_daddr) {
/* NB: never process the self-linked entry at the end */
sc->sc_stats.ast_rx_hitqueueend++;
break;
}
/* XXX sync descriptor memory */
/*
* Must provide the virtual address of the current
* descriptor, the physical address, and the virtual
* address of the next descriptor in the h/w chain.
* This allows the HAL to look ahead to see if the
* hardware is done with a descriptor by checking the
* done bit in the following descriptor and the address
* of the current descriptor the DMA engine is working
* on. All this is necessary because of our use of
* a self-linked list to avoid rx overruns.
*/
rs = &bf->bf_status.ds_rxstat;
status = ath_hal_rxprocdesc(ah, ds,
bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
ath_printrxbuf(sc, bf, 0, status == HAL_OK);
#endif
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
sc->sc_rx_statuslen, (char *) ds);
#endif /* ATH_DEBUG_ALQ */
if (status == HAL_EINPROGRESS)
break;
TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
npkts++;
/*
* Process a single frame.
*/
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
bf->bf_m = NULL;
if (ath_rx_pkt(sc, rs, status, tsf, nf, HAL_RX_QUEUE_HP, bf, m))
ngood++;
rx_proc_next:
/*
* If there's a holding buffer, insert that onto
* the RX list; the hardware is now definitely not pointing
* to it now.
*/
ret = 0;
if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf != NULL) {
TAILQ_INSERT_TAIL(&sc->sc_rxbuf,
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf,
bf_list);
ret = ath_rxbuf_init(sc,
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf);
}
/*
* Next, throw our buffer into the holding entry. The hardware
* may use the descriptor to read the link pointer before
* DMAing the next descriptor in to write out a packet.
*/
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = bf;
} while (ret == 0);
/* rx signal state monitoring */
ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
if (ngood)
sc->sc_lastrx = tsf;
ATH_KTR(sc, ATH_KTR_RXPROC, 2, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood);
/* Queue DFS tasklet if needed */
if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan))
taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
/*
* Now that all the RX frames were handled that
* need to be handled, kick the PCU if there's
* been an RXEOL condition.
*/
if (resched && kickpcu) {
ATH_PCU_LOCK(sc);
ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_rx_proc: kickpcu");
device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n",
__func__, npkts);
/*
* Go through the process of fully tearing down
* the RX buffers and reinitialising them.
*
* There's a hardware bug that causes the RX FIFO
* to get confused under certain conditions and
* constantly write over the same frame, leading
* the RX driver code here to get heavily confused.
*/
/*
* XXX Has RX DMA stopped enough here to just call
* ath_startrecv()?
* XXX Do we need to use the holding buffer to restart
* RX DMA by appending entries to the final
* descriptor? Quite likely.
*/
#if 1
ath_startrecv(sc);
#else
/*
* Disabled for now - it'd be nice to be able to do
* this in order to limit the amount of CPU time spent
* reinitialising the RX side (and thus minimise RX
* drops) however there's a hardware issue that
* causes things to get too far out of whack.
*/
/*
* XXX can we hold the PCU lock here?
* Are there any net80211 buffer calls involved?
*/
bf = TAILQ_FIRST(&sc->sc_rxbuf);
ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
ath_hal_rxena(ah); /* enable recv descriptors */
ath_mode_init(sc); /* set filters, etc. */
ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
#endif
ath_hal_intrset(ah, sc->sc_imask);
sc->sc_kickpcu = 0;
ATH_PCU_UNLOCK(sc);
}
- /* XXX check this inside of IF_LOCK? */
- if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
#ifdef IEEE80211_SUPPORT_SUPERG
+ if (resched)
ieee80211_ff_age_all(ic, 100);
#endif
- if (!IFQ_IS_EMPTY(&ifp->if_snd))
- ath_tx_kick(sc);
- }
-#undef PA2DESC
/*
* Put the hardware to sleep again if we're done with it.
*/
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
/*
* If we hit the maximum number of frames in this round,
* reschedule for another immediate pass. This gives
* the TX and TX completion routines time to run, which
* will reduce latency.
*/
if (npkts >= ATH_RX_MAX)
sc->sc_rx.recv_sched(sc, resched);
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt--;
ATH_PCU_UNLOCK(sc);
}
-
+#undef PA2DESC
#undef ATH_RX_MAX
/*
* Only run the RX proc if it's not already running.
* Since this may get run as part of the reset/flush path,
* the task can't clash with an existing, running tasklet.
*/
static void
ath_legacy_rx_tasklet(void *arg, int npending)
{
struct ath_softc *sc = arg;
ATH_KTR(sc, ATH_KTR_RXPROC, 1, "ath_rx_proc: pending=%d", npending);
DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
ATH_PCU_LOCK(sc);
if (sc->sc_inreset_cnt > 0) {
device_printf(sc->sc_dev,
"%s: sc_inreset_cnt > 0; skipping\n", __func__);
ATH_PCU_UNLOCK(sc);
return;
}
ATH_PCU_UNLOCK(sc);
ath_rx_proc(sc, 1);
}
static void
ath_legacy_flushrecv(struct ath_softc *sc)
{
ath_rx_proc(sc, 0);
}
static void
ath_legacy_flush_rxpending(struct ath_softc *sc)
{
/* XXX ATH_RX_LOCK_ASSERT(sc); */
if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending != NULL) {
m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
}
if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending != NULL) {
m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
}
}
static int
ath_legacy_flush_rxholdbf(struct ath_softc *sc)
{
struct ath_buf *bf;
/* XXX ATH_RX_LOCK_ASSERT(sc); */
/*
* If there are RX holding buffers, free them here and return
* them to the list.
*
* XXX should just verify that bf->bf_m is NULL, as it must
* be at this point!
*/
bf = sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf;
if (bf != NULL) {
if (bf->bf_m != NULL)
m_freem(bf->bf_m);
bf->bf_m = NULL;
TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
(void) ath_rxbuf_init(sc, bf);
}
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = NULL;
bf = sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf;
if (bf != NULL) {
if (bf->bf_m != NULL)
m_freem(bf->bf_m);
bf->bf_m = NULL;
TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
(void) ath_rxbuf_init(sc, bf);
}
sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf = NULL;
return (0);
}
/*
* Disable the receive h/w in preparation for a reset.
*/
static void
ath_legacy_stoprecv(struct ath_softc *sc, int dodelay)
{
#define PA2DESC(_sc, _pa) \
((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
struct ath_hal *ah = sc->sc_ah;
ATH_RX_LOCK(sc);
ath_hal_stoppcurecv(ah); /* disable PCU */
ath_hal_setrxfilter(ah, 0); /* clear recv filter */
ath_hal_stopdmarecv(ah); /* disable DMA engine */
/*
* TODO: see if this particular DELAY() is required; it may be
* masking some missing FIFO flush or DMA sync.
*/
#if 0
if (dodelay)
#endif
DELAY(3000); /* 3ms is long enough for 1 frame */
#ifdef ATH_DEBUG
if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
struct ath_buf *bf;
u_int ix;
device_printf(sc->sc_dev,
"%s: rx queue %p, link %p\n",
__func__,
(caddr_t)(uintptr_t) ath_hal_getrxbuf(ah, HAL_RX_QUEUE_HP),
sc->sc_rxlink);
ix = 0;
TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
struct ath_desc *ds = bf->bf_desc;
struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
ath_printrxbuf(sc, bf, ix, status == HAL_OK);
ix++;
}
}
#endif
(void) ath_legacy_flush_rxpending(sc);
(void) ath_legacy_flush_rxholdbf(sc);
sc->sc_rxlink = NULL; /* just in case */
ATH_RX_UNLOCK(sc);
#undef PA2DESC
}
/*
* XXX TODO: something was calling startrecv without calling
* stoprecv. Let's figure out what/why. It was showing up
* as a mbuf leak (rxpending) and ath_buf leak (holdbf.)
*/
/*
* Enable the receive h/w following a reset.
*/
static int
ath_legacy_startrecv(struct ath_softc *sc)
{
struct ath_hal *ah = sc->sc_ah;
struct ath_buf *bf;
ATH_RX_LOCK(sc);
/*
* XXX should verify these are already all NULL!
*/
sc->sc_rxlink = NULL;
(void) ath_legacy_flush_rxpending(sc);
(void) ath_legacy_flush_rxholdbf(sc);
/*
* Re-chain all of the buffers in the RX buffer list.
*/
TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
int error = ath_rxbuf_init(sc, bf);
if (error != 0) {
DPRINTF(sc, ATH_DEBUG_RECV,
"%s: ath_rxbuf_init failed %d\n",
__func__, error);
return error;
}
}
bf = TAILQ_FIRST(&sc->sc_rxbuf);
ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
ath_hal_rxena(ah); /* enable recv descriptors */
ath_mode_init(sc); /* set filters, etc. */
ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */
ATH_RX_UNLOCK(sc);
return 0;
}
static int
ath_legacy_dma_rxsetup(struct ath_softc *sc)
{
int error;
error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
"rx", sizeof(struct ath_desc), ath_rxbuf, 1);
if (error != 0)
return (error);
return (0);
}
static int
ath_legacy_dma_rxteardown(struct ath_softc *sc)
{
if (sc->sc_rxdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
return (0);
}
static void
ath_legacy_recv_sched(struct ath_softc *sc, int dosched)
{
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
}
static void
ath_legacy_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE q,
int dosched)
{
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
}
void
ath_recv_setup_legacy(struct ath_softc *sc)
{
/* Sensible legacy defaults */
/*
* XXX this should be changed to properly support the
* exact RX descriptor size for each HAL.
*/
sc->sc_rx_statuslen = sizeof(struct ath_desc);
sc->sc_rx.recv_start = ath_legacy_startrecv;
sc->sc_rx.recv_stop = ath_legacy_stoprecv;
sc->sc_rx.recv_flush = ath_legacy_flushrecv;
sc->sc_rx.recv_tasklet = ath_legacy_rx_tasklet;
sc->sc_rx.recv_rxbuf_init = ath_legacy_rxbuf_init;
sc->sc_rx.recv_setup = ath_legacy_dma_rxsetup;
sc->sc_rx.recv_teardown = ath_legacy_dma_rxteardown;
sc->sc_rx.recv_sched = ath_legacy_recv_sched;
sc->sc_rx.recv_sched_queue = ath_legacy_recv_sched_queue;
}
Index: head/sys/dev/ath/if_ath_rx_edma.c
===================================================================
--- head/sys/dev/ath/if_ath_rx_edma.c (revision 287196)
+++ head/sys/dev/ath/if_ath_rx_edma.c (revision 287197)
@@ -1,1013 +1,1007 @@
/*-
* Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Atheros Wireless LAN controller.
*
* This software is derived from work of Atsushi Onoe; his contribution
* is greatly appreciated.
*/
#include "opt_inet.h"
#include "opt_ath.h"
/*
* This is needed for register operations which are performed
* by the driver - eg, calls to ath_hal_gettsf32().
*
* It's also required for any AH_DEBUG checks in here, eg the
* module dependencies.
*/
#include "opt_ah.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/module.h>
#include <sys/ktr.h>
#include <sys/smp.h> /* for mp_ncpus */
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
#include <dev/ath/ath_hal/ah_diagcodes.h>
#include <dev/ath/if_ath_debug.h>
#include <dev/ath/if_ath_misc.h>
#include <dev/ath/if_ath_tsf.h>
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_sysctl.h>
#include <dev/ath/if_ath_led.h>
#include <dev/ath/if_ath_keycache.h>
#include <dev/ath/if_ath_rx.h>
#include <dev/ath/if_ath_beacon.h>
#include <dev/ath/if_athdfs.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
#include <dev/ath/if_ath_rx_edma.h>
#ifdef ATH_DEBUG_ALQ
#include <dev/ath/if_ath_alq.h>
#endif
/*
* some general macros
*/
#define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1)
#define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1)
MALLOC_DECLARE(M_ATHDEV);
/*
* XXX TODO:
*
* + Make sure the FIFO is correctly flushed and reinitialised
* through a reset;
* + Verify multi-descriptor frames work!
* + There's a "memory use after free" which needs to be tracked down
* and fixed ASAP. I've seen this in the legacy path too, so it
* may be a generic RX path issue.
*/
/*
* XXX shuffle the function orders so these pre-declarations aren't
* required!
*/
static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype,
int nbufs);
static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype);
static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf);
static void ath_edma_recv_proc_queue(struct ath_softc *sc,
HAL_RX_QUEUE qtype, int dosched);
static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc,
HAL_RX_QUEUE qtype, int dosched);
static void
ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
{
struct ath_hal *ah = sc->sc_ah;
ATH_RX_LOCK(sc);
ath_hal_stoppcurecv(ah);
ath_hal_setrxfilter(ah, 0);
/*
*
*/
if (ath_hal_stopdmarecv(ah) == AH_TRUE)
sc->sc_rx_stopped = 1;
/*
* Give the various bus FIFOs (not EDMA descriptor FIFO)
* time to finish flushing out data.
*/
DELAY(3000);
/* Flush RX pending for each queue */
/* XXX should generic-ify this */
if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) {
m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
}
if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) {
m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
}
ATH_RX_UNLOCK(sc);
}
/*
* Re-initialise the FIFO given the current buffer contents.
* Specifically, walk from head -> tail, pushing the FIFO contents
* back into the FIFO.
*/
static void
ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
struct ath_buf *bf;
int i, j;
ATH_RX_LOCK_ASSERT(sc);
i = re->m_fifo_head;
for (j = 0; j < re->m_fifo_depth; j++) {
bf = re->m_fifo[i];
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Q%d: pos=%i, addr=0x%jx\n",
__func__,
qtype,
i,
(uintmax_t)bf->bf_daddr);
ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
INCR(i, re->m_fifolen);
}
/* Ensure this worked out right */
if (i != re->m_fifo_tail) {
device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n",
__func__,
i,
re->m_fifo_tail);
}
}
/*
* Start receive.
*/
static int
ath_edma_startrecv(struct ath_softc *sc)
{
struct ath_hal *ah = sc->sc_ah;
ATH_RX_LOCK(sc);
/*
* Sanity check - are we being called whilst RX
* isn't stopped? If so, we may end up pushing
* too many entries into the RX FIFO and
* badness occurs.
*/
/* Enable RX FIFO */
ath_hal_rxena(ah);
/*
* In theory the hardware has been initialised, right?
*/
if (sc->sc_rx_resetted == 1) {
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Re-initing HP FIFO\n", __func__);
ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP);
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Re-initing LP FIFO\n", __func__);
ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP);
sc->sc_rx_resetted = 0;
} else {
device_printf(sc->sc_dev,
"%s: called without resetting chip?\n",
__func__);
}
/* Add up to m_fifolen entries in each queue */
/*
* These must occur after the above write so the FIFO buffers
* are pushed/tracked in the same order as the hardware will
* process them.
*
* XXX TODO: is this really necessary? We should've stopped
* the hardware already and reinitialised it, so it's a no-op.
*/
ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP,
sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen);
ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP,
sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen);
ath_mode_init(sc);
ath_hal_startpcurecv(ah);
/*
* We're now doing RX DMA!
*/
sc->sc_rx_stopped = 0;
ATH_RX_UNLOCK(sc);
return (0);
}
static void
ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
int dosched)
{
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_edma_recv_proc_queue(sc, qtype, dosched);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
}
static void
ath_edma_recv_sched(struct ath_softc *sc, int dosched)
{
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
}
static void
ath_edma_recv_flush(struct ath_softc *sc)
{
DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__);
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt++;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
/*
* Flush any active frames from FIFO -> deferred list
*/
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0);
/*
* Process what's in the deferred queue
*/
/*
* XXX: If we read the tsf/channoise here and then pass it in,
* we could restore the power state before processing
* the deferred queue.
*/
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0);
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt--;
ATH_PCU_UNLOCK(sc);
}
/*
* Process frames from the current queue into the deferred queue.
*/
static void
ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
int dosched)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
struct ath_rx_status *rs;
struct ath_desc *ds;
struct ath_buf *bf;
struct mbuf *m;
struct ath_hal *ah = sc->sc_ah;
uint64_t tsf;
uint16_t nf;
int npkts = 0;
tsf = ath_hal_gettsf64(ah);
nf = ath_hal_getchannoise(ah, sc->sc_curchan);
sc->sc_stats.ast_rx_noise = nf;
ATH_RX_LOCK(sc);
#if 1
if (sc->sc_rx_resetted == 1) {
/*
* XXX We shouldn't ever be scheduled if
* receive has been stopped - so complain
* loudly!
*/
device_printf(sc->sc_dev,
"%s: sc_rx_resetted=1! Bad!\n",
__func__);
ATH_RX_UNLOCK(sc);
return;
}
#endif
do {
bf = re->m_fifo[re->m_fifo_head];
/* This shouldn't occur! */
if (bf == NULL) {
device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n",
__func__,
qtype);
break;
}
m = bf->bf_m;
ds = bf->bf_desc;
/*
* Sync descriptor memory - this also syncs the buffer for us.
* EDMA descriptors are in cached memory.
*/
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
rs = &bf->bf_status.ds_rxstat;
bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr,
NULL, rs);
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
#endif /* ATH_DEBUG */
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
sc->sc_rx_statuslen, (char *) ds);
#endif /* ATH_DEBUG */
if (bf->bf_rxstatus == HAL_EINPROGRESS)
break;
/*
* Completed descriptor.
*/
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Q%d: completed!\n", __func__, qtype);
npkts++;
/*
* We've been synced already, so unmap.
*/
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
/*
* Remove the FIFO entry and place it on the completion
* queue.
*/
re->m_fifo[re->m_fifo_head] = NULL;
TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list);
/* Bump the descriptor FIFO stats */
INCR(re->m_fifo_head, re->m_fifolen);
re->m_fifo_depth--;
/* XXX check it doesn't fall below 0 */
} while (re->m_fifo_depth > 0);
/* Append some more fresh frames to the FIFO */
if (dosched)
ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
ATH_RX_UNLOCK(sc);
/* rx signal state monitoring */
ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
"ath edma rx proc: npkts=%d\n",
npkts);
return;
}
/*
* Flush the deferred queue.
*
* This destructively flushes the deferred queue - it doesn't
* call the wireless stack on each mbuf.
*/
static void
ath_edma_flush_deferred_queue(struct ath_softc *sc)
{
struct ath_buf *bf;
ATH_RX_LOCK_ASSERT(sc);
/* Free in one set, inside the lock */
while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) {
bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list);
/* Free the buffer/mbuf */
ath_edma_rxbuf_free(sc, bf);
}
while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) {
bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list);
/* Free the buffer/mbuf */
ath_edma_rxbuf_free(sc, bf);
}
}
static int
ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
int dosched)
{
int ngood = 0;
uint64_t tsf;
struct ath_buf *bf, *next;
struct ath_rx_status *rs;
int16_t nf;
ath_bufhead rxlist;
struct mbuf *m;
TAILQ_INIT(&rxlist);
nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan);
/*
* XXX TODO: the NF/TSF should be stamped on the bufs themselves,
* otherwise we may end up adding in the wrong values if this
* is delayed too far..
*/
tsf = ath_hal_gettsf64(sc->sc_ah);
/* Copy the list over */
ATH_RX_LOCK(sc);
TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list);
ATH_RX_UNLOCK(sc);
/* Handle the completed descriptors */
/*
* XXX is this SAFE call needed? The ath_buf entries
* aren't modified by ath_rx_pkt, right?
*/
TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) {
/*
* Skip the RX descriptor status - start at the data offset
*/
m_adj(bf->bf_m, sc->sc_rx_statuslen);
/* Handle the frame */
rs = &bf->bf_status.ds_rxstat;
m = bf->bf_m;
bf->bf_m = NULL;
if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m))
ngood++;
}
if (ngood) {
sc->sc_lastrx = tsf;
}
ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
"ath edma rx deferred proc: ngood=%d\n",
ngood);
/* Free in one set, inside the lock */
ATH_RX_LOCK(sc);
while (! TAILQ_EMPTY(&rxlist)) {
bf = TAILQ_FIRST(&rxlist);
TAILQ_REMOVE(&rxlist, bf, bf_list);
/* Free the buffer/mbuf */
ath_edma_rxbuf_free(sc, bf);
}
ATH_RX_UNLOCK(sc);
return (ngood);
}
static void
ath_edma_recv_tasklet(void *arg, int npending)
{
struct ath_softc *sc = (struct ath_softc *) arg;
- struct ifnet *ifp = sc->sc_ifp;
#ifdef IEEE80211_SUPPORT_SUPERG
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
#endif
DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n",
__func__,
npending);
ATH_PCU_LOCK(sc);
if (sc->sc_inreset_cnt > 0) {
device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n",
__func__);
ATH_PCU_UNLOCK(sc);
return;
}
sc->sc_rxproc_cnt++;
ATH_PCU_UNLOCK(sc);
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1);
ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1);
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1);
ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1);
/*
* XXX: If we read the tsf/channoise here and then pass it in,
* we could restore the power state before processing
* the deferred queue.
*/
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
- /* XXX inside IF_LOCK ? */
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
#ifdef IEEE80211_SUPPORT_SUPERG
- ieee80211_ff_age_all(ic, 100);
+ ieee80211_ff_age_all(ic, 100);
#endif
- if (! IFQ_IS_EMPTY(&ifp->if_snd))
- ath_tx_kick(sc);
- }
if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt--;
ATH_PCU_UNLOCK(sc);
}
/*
* Allocate an RX mbuf for the given ath_buf and initialise
* it for EDMA.
*
* + Allocate a 4KB mbuf;
* + Setup the DMA map for the given buffer;
* + Return that.
*/
static int
ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
{
struct mbuf *m;
int error;
int len;
ATH_RX_LOCK_ASSERT(sc);
m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA);
if (! m)
return (ENOBUFS); /* XXX ?*/
/* XXX warn/enforce alignment */
len = m->m_ext.ext_size;
#if 0
device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n",
__func__,
m,
len,
mtod(m, char *));
#endif
m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
/*
* Populate ath_buf fields.
*/
bf->bf_desc = mtod(m, struct ath_desc *);
bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */
bf->bf_m = m;
/*
* Zero the descriptor and ensure it makes it out to the
* bounce buffer if one is required.
*
* XXX PREWRITE will copy the whole buffer; we only needed it
* to sync the first 32 DWORDS. Oh well.
*/
memset(bf->bf_desc, '\0', sc->sc_rx_statuslen);
/*
* Create DMA mapping.
*/
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev, "%s: failed; error=%d\n",
__func__,
error);
m_freem(m);
return (error);
}
/*
* Set daddr to the physical mapping page.
*/
bf->bf_daddr = bf->bf_segs[0].ds_addr;
/*
* Prepare for the upcoming read.
*
* We need to both sync some data into the buffer (the zero'ed
* descriptor payload) and also prepare for the read that's going
* to occur.
*/
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
/* Finish! */
return (0);
}
/*
* Allocate a RX buffer.
*/
static struct ath_buf *
ath_edma_rxbuf_alloc(struct ath_softc *sc)
{
struct ath_buf *bf;
int error;
ATH_RX_LOCK_ASSERT(sc);
/* Allocate buffer */
bf = TAILQ_FIRST(&sc->sc_rxbuf);
/* XXX shouldn't happen upon startup? */
if (bf == NULL) {
device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n",
__func__);
return (NULL);
}
/* Remove it from the free list */
TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
/* Assign RX mbuf to it */
error = ath_edma_rxbuf_init(sc, bf);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: bf=%p, rxbuf alloc failed! error=%d\n",
__func__,
bf,
error);
TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
return (NULL);
}
return (bf);
}
static void
ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf)
{
ATH_RX_LOCK_ASSERT(sc);
/*
* Only unload the frame if we haven't consumed
* the mbuf via ath_rx_pkt().
*/
if (bf->bf_m) {
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
m_freem(bf->bf_m);
bf->bf_m = NULL;
}
/* XXX lock? */
TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
}
/*
* Allocate up to 'n' entries and push them onto the hardware FIFO.
*
* Return how many entries were successfully pushed onto the
* FIFO.
*/
static int
ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
struct ath_buf *bf;
int i;
ATH_RX_LOCK_ASSERT(sc);
/*
* Allocate buffers until the FIFO is full or nbufs is reached.
*/
for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) {
/* Ensure the FIFO is already blank, complain loudly! */
if (re->m_fifo[re->m_fifo_tail] != NULL) {
device_printf(sc->sc_dev,
"%s: Q%d: fifo[%d] != NULL (%p)\n",
__func__,
qtype,
re->m_fifo_tail,
re->m_fifo[re->m_fifo_tail]);
/* Free the slot */
ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]);
re->m_fifo_depth--;
/* XXX check it's not < 0 */
re->m_fifo[re->m_fifo_tail] = NULL;
}
bf = ath_edma_rxbuf_alloc(sc);
/* XXX should ensure the FIFO is not NULL? */
if (bf == NULL) {
device_printf(sc->sc_dev,
"%s: Q%d: alloc failed: i=%d, nbufs=%d?\n",
__func__,
qtype,
i,
nbufs);
break;
}
re->m_fifo[re->m_fifo_tail] = bf;
/* Write to the RX FIFO */
DPRINTF(sc, ATH_DEBUG_EDMA_RX,
"%s: Q%d: putrxbuf=%p (0x%jx)\n",
__func__,
qtype,
bf->bf_desc,
(uintmax_t) bf->bf_daddr);
ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
re->m_fifo_depth++;
INCR(re->m_fifo_tail, re->m_fifolen);
}
/*
* Return how many were allocated.
*/
DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n",
__func__,
qtype,
nbufs,
i);
return (i);
}
static int
ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
int i;
ATH_RX_LOCK_ASSERT(sc);
for (i = 0; i < re->m_fifolen; i++) {
if (re->m_fifo[i] != NULL) {
#ifdef ATH_DEBUG
struct ath_buf *bf = re->m_fifo[i];
if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
ath_printrxbuf(sc, bf, 0, HAL_OK);
#endif
ath_edma_rxbuf_free(sc, re->m_fifo[i]);
re->m_fifo[i] = NULL;
re->m_fifo_depth--;
}
}
if (re->m_rxpending != NULL) {
m_freem(re->m_rxpending);
re->m_rxpending = NULL;
}
re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
return (0);
}
/*
* Setup the initial RX FIFO structure.
*/
static int
ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
ATH_RX_LOCK_ASSERT(sc);
if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) {
device_printf(sc->sc_dev, "%s: qtype=%d, failed\n",
__func__,
qtype);
return (-EINVAL);
}
if (bootverbose)
device_printf(sc->sc_dev,
"%s: type=%d, FIFO depth = %d entries\n",
__func__,
qtype,
re->m_fifolen);
/* Allocate ath_buf FIFO array, pre-zero'ed */
re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen,
M_ATHDEV,
M_NOWAIT | M_ZERO);
if (re->m_fifo == NULL) {
device_printf(sc->sc_dev, "%s: malloc failed\n",
__func__);
return (-ENOMEM);
}
/*
* Set initial "empty" state.
*/
re->m_rxpending = NULL;
re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
return (0);
}
static int
ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype)
{
struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
device_printf(sc->sc_dev, "%s: called; qtype=%d\n",
__func__,
qtype);
free(re->m_fifo, M_ATHDEV);
return (0);
}
static int
ath_edma_dma_rxsetup(struct ath_softc *sc)
{
int error;
/*
* Create RX DMA tag and buffers.
*/
error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
"rx", ath_rxbuf, sc->sc_rx_statuslen);
if (error != 0)
return error;
ATH_RX_LOCK(sc);
(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP);
(void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP);
ATH_RX_UNLOCK(sc);
return (0);
}
static int
ath_edma_dma_rxteardown(struct ath_softc *sc)
{
ATH_RX_LOCK(sc);
ath_edma_flush_deferred_queue(sc);
ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP);
ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP);
ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP);
ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP);
ATH_RX_UNLOCK(sc);
/* Free RX ath_buf */
/* Free RX DMA tag */
if (sc->sc_rxdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
return (0);
}
void
ath_recv_setup_edma(struct ath_softc *sc)
{
/* Set buffer size to 4k */
sc->sc_edma_bufsize = 4096;
/* Fetch EDMA field and buffer sizes */
(void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen);
/* Configure the hardware with the RX buffer size */
(void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize -
sc->sc_rx_statuslen);
if (bootverbose) {
device_printf(sc->sc_dev, "RX status length: %d\n",
sc->sc_rx_statuslen);
device_printf(sc->sc_dev, "RX buffer size: %d\n",
sc->sc_edma_bufsize);
}
sc->sc_rx.recv_stop = ath_edma_stoprecv;
sc->sc_rx.recv_start = ath_edma_startrecv;
sc->sc_rx.recv_flush = ath_edma_recv_flush;
sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet;
sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init;
sc->sc_rx.recv_setup = ath_edma_dma_rxsetup;
sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown;
sc->sc_rx.recv_sched = ath_edma_recv_sched;
sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue;
}
Index: head/sys/dev/ath/if_ath_sysctl.c
===================================================================
--- head/sys/dev/ath/if_ath_sysctl.c (revision 287196)
+++ head/sys/dev/ath/if_ath_sysctl.c (revision 287197)
@@ -1,1350 +1,1346 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Atheros Wireless LAN controller.
*
* This software is derived from work of Atsushi Onoe; his contribution
* is greatly appreciated.
*/
#include "opt_inet.h"
#include "opt_ath.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
#include <dev/ath/ath_hal/ah_diagcodes.h>
#include <dev/ath/if_ath_debug.h>
#include <dev/ath/if_ath_led.h>
#include <dev/ath/if_ath_misc.h>
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_sysctl.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
#ifdef ATH_DEBUG_ALQ
#include <dev/ath/if_ath_alq.h>
#endif
static int
ath_sysctl_slottime(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int slottime;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
slottime = ath_hal_getslottime(sc->sc_ah);
ATH_UNLOCK(sc);
error = sysctl_handle_int(oidp, &slottime, 0, req);
if (error || !req->newptr)
goto finish;
error = !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return error;
}
static int
ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int acktimeout;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
acktimeout = ath_hal_getacktimeout(sc->sc_ah);
ATH_UNLOCK(sc);
error = sysctl_handle_int(oidp, &acktimeout, 0, req);
if (error || !req->newptr)
goto finish;
error = !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int ctstimeout;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ctstimeout = ath_hal_getctstimeout(sc->sc_ah);
ATH_UNLOCK(sc);
error = sysctl_handle_int(oidp, &ctstimeout, 0, req);
if (error || !req->newptr)
goto finish;
error = !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_softled(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int softled = sc->sc_softled;
int error;
error = sysctl_handle_int(oidp, &softled, 0, req);
if (error || !req->newptr)
return error;
softled = (softled != 0);
if (softled != sc->sc_softled) {
if (softled) {
/* NB: handle any sc_ledpin change */
ath_led_config(sc);
}
sc->sc_softled = softled;
}
return 0;
}
static int
ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int ledpin = sc->sc_ledpin;
int error;
error = sysctl_handle_int(oidp, &ledpin, 0, req);
if (error || !req->newptr)
return error;
if (ledpin != sc->sc_ledpin) {
sc->sc_ledpin = ledpin;
if (sc->sc_softled) {
ath_led_config(sc);
}
}
return 0;
}
static int
ath_sysctl_hardled(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int hardled = sc->sc_hardled;
int error;
error = sysctl_handle_int(oidp, &hardled, 0, req);
if (error || !req->newptr)
return error;
hardled = (hardled != 0);
if (hardled != sc->sc_hardled) {
if (hardled) {
/* NB: handle any sc_ledpin change */
ath_led_config(sc);
}
sc->sc_hardled = hardled;
}
return 0;
}
static int
ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int txantenna;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
txantenna = ath_hal_getantennaswitch(sc->sc_ah);
error = sysctl_handle_int(oidp, &txantenna, 0, req);
if (!error && req->newptr) {
/* XXX assumes 2 antenna ports */
if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B) {
error = EINVAL;
goto finish;
}
ath_hal_setantennaswitch(sc->sc_ah, txantenna);
/*
* NB: with the switch locked this isn't meaningful,
* but set it anyway so things like radiotap get
* consistent info in their data.
*/
sc->sc_txantenna = txantenna;
}
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int defantenna;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
defantenna = ath_hal_getdefantenna(sc->sc_ah);
ATH_UNLOCK(sc);
error = sysctl_handle_int(oidp, &defantenna, 0, req);
if (!error && req->newptr)
ath_hal_setdefantenna(sc->sc_ah, defantenna);
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_diversity(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int diversity;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
diversity = ath_hal_getdiversity(sc->sc_ah);
error = sysctl_handle_int(oidp, &diversity, 0, req);
if (error || !req->newptr)
goto finish;
if (!ath_hal_setdiversity(sc->sc_ah, diversity)) {
error = EINVAL;
goto finish;
}
sc->sc_diversity = diversity;
error = 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_diag(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int32_t diag;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
if (!ath_hal_getdiag(sc->sc_ah, &diag)) {
error = EINVAL;
goto finish;
}
error = sysctl_handle_int(oidp, &diag, 0, req);
if (error || !req->newptr)
goto finish;
error = !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
- struct ifnet *ifp = sc->sc_ifp;
u_int32_t scale;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
(void) ath_hal_gettpscale(sc->sc_ah, &scale);
error = sysctl_handle_int(oidp, &scale, 0, req);
if (error || !req->newptr)
goto finish;
error = !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
- (ifp->if_drv_flags & IFF_DRV_RUNNING) ?
- ath_reset(ifp, ATH_RESET_NOLOSS) : 0;
+ (sc->sc_running) ? ath_reset(sc, ATH_RESET_NOLOSS) : 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_tpc(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int tpc;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
tpc = ath_hal_gettpc(sc->sc_ah);
error = sysctl_handle_int(oidp, &tpc, 0, req);
if (error || !req->newptr)
goto finish;
error = !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
- struct ifnet *ifp = sc->sc_ifp;
struct ath_hal *ah = sc->sc_ah;
u_int rfkill;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
rfkill = ath_hal_getrfkill(ah);
error = sysctl_handle_int(oidp, &rfkill, 0, req);
if (error || !req->newptr)
goto finish;
if (rfkill == ath_hal_getrfkill(ah)) { /* unchanged */
error = 0;
goto finish;
}
if (!ath_hal_setrfkill(ah, rfkill)) {
error = EINVAL;
goto finish;
}
- error = (ifp->if_drv_flags & IFF_DRV_RUNNING) ?
- ath_reset(ifp, ATH_RESET_FULL) : 0;
+ error = sc->sc_running ? ath_reset(sc, ATH_RESET_FULL) : 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_txagg(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int i, t, param = 0;
int error;
struct ath_buf *bf;
error = sysctl_handle_int(oidp, &param, 0, req);
if (error || !req->newptr)
return error;
if (param != 1)
return 0;
printf("no tx bufs (empty list): %d\n", sc->sc_stats.ast_tx_getnobuf);
printf("no tx bufs (was busy): %d\n", sc->sc_stats.ast_tx_getbusybuf);
printf("aggr single packet: %d\n",
sc->sc_aggr_stats.aggr_single_pkt);
printf("aggr single packet w/ BAW closed: %d\n",
sc->sc_aggr_stats.aggr_baw_closed_single_pkt);
printf("aggr non-baw packet: %d\n",
sc->sc_aggr_stats.aggr_nonbaw_pkt);
printf("aggr aggregate packet: %d\n",
sc->sc_aggr_stats.aggr_aggr_pkt);
printf("aggr single packet low hwq: %d\n",
sc->sc_aggr_stats.aggr_low_hwq_single_pkt);
printf("aggr single packet RTS aggr limited: %d\n",
sc->sc_aggr_stats.aggr_rts_aggr_limited);
printf("aggr sched, no work: %d\n",
sc->sc_aggr_stats.aggr_sched_nopkt);
for (i = 0; i < 64; i++) {
printf("%2d: %10d ", i, sc->sc_aggr_stats.aggr_pkts[i]);
if (i % 4 == 3)
printf("\n");
}
printf("\n");
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
printf("HW TXQ %d: axq_depth=%d, axq_aggr_depth=%d, "
"axq_fifo_depth=%d, holdingbf=%p\n",
i,
sc->sc_txq[i].axq_depth,
sc->sc_txq[i].axq_aggr_depth,
sc->sc_txq[i].axq_fifo_depth,
sc->sc_txq[i].axq_holdingbf);
}
}
i = t = 0;
ATH_TXBUF_LOCK(sc);
TAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) {
if (bf->bf_flags & ATH_BUF_BUSY) {
printf("Busy: %d\n", t);
i++;
}
t++;
}
ATH_TXBUF_UNLOCK(sc);
printf("Total TX buffers: %d; Total TX buffers busy: %d (%d)\n",
t, i, sc->sc_txbuf_cnt);
i = t = 0;
ATH_TXBUF_LOCK(sc);
TAILQ_FOREACH(bf, &sc->sc_txbuf_mgmt, bf_list) {
if (bf->bf_flags & ATH_BUF_BUSY) {
printf("Busy: %d\n", t);
i++;
}
t++;
}
ATH_TXBUF_UNLOCK(sc);
printf("Total mgmt TX buffers: %d; Total mgmt TX buffers busy: %d\n",
t, i);
ATH_RX_LOCK(sc);
for (i = 0; i < 2; i++) {
printf("%d: fifolen: %d/%d; head=%d; tail=%d; m_pending=%p, m_holdbf=%p\n",
i,
sc->sc_rxedma[i].m_fifo_depth,
sc->sc_rxedma[i].m_fifolen,
sc->sc_rxedma[i].m_fifo_head,
sc->sc_rxedma[i].m_fifo_tail,
sc->sc_rxedma[i].m_rxpending,
sc->sc_rxedma[i].m_holdbf);
}
i = 0;
TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
i++;
}
printf("Total RX buffers in free list: %d buffers\n",
i);
ATH_RX_UNLOCK(sc);
return 0;
}
static int
ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int rfsilent;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
(void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent);
error = sysctl_handle_int(oidp, &rfsilent, 0, req);
if (error || !req->newptr)
goto finish;
if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent)) {
error = EINVAL;
goto finish;
}
/*
* Earlier chips (< AR5212) have up to 8 GPIO
* pins exposed.
*
* AR5416 and later chips have many more GPIO
* pins (up to 16) so the mask is expanded to
* four bits.
*/
sc->sc_rfsilentpin = rfsilent & 0x3c;
sc->sc_rfsilentpol = (rfsilent & 0x2) != 0;
error = 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_tpack(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int32_t tpack;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
(void) ath_hal_gettpack(sc->sc_ah, &tpack);
error = sysctl_handle_int(oidp, &tpack, 0, req);
if (error || !req->newptr)
goto finish;
error = !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
u_int32_t tpcts;
int error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
(void) ath_hal_gettpcts(sc->sc_ah, &tpcts);
error = sysctl_handle_int(oidp, &tpcts, 0, req);
if (error || !req->newptr)
goto finish;
error = !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
static int
ath_sysctl_intmit(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int intmit, error;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
intmit = ath_hal_getintmit(sc->sc_ah);
error = sysctl_handle_int(oidp, &intmit, 0, req);
if (error || !req->newptr)
goto finish;
/* reusing error; 1 here means "good"; 0 means "fail" */
error = ath_hal_setintmit(sc->sc_ah, intmit);
if (! error) {
error = EINVAL;
goto finish;
}
/*
* Reset the hardware here - disabling ANI in the HAL
* doesn't reset ANI related registers, so it'll leave
* things in an inconsistent state.
*/
- if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
- ath_reset(sc->sc_ifp, ATH_RESET_NOLOSS);
+ if (sc->sc_running)
+ ath_reset(sc, ATH_RESET_NOLOSS);
error = 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
#ifdef IEEE80211_SUPPORT_TDMA
static int
ath_sysctl_setcca(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int setcca, error;
setcca = sc->sc_setcca;
error = sysctl_handle_int(oidp, &setcca, 0, req);
if (error || !req->newptr)
return error;
sc->sc_setcca = (setcca != 0);
return 0;
}
#endif /* IEEE80211_SUPPORT_TDMA */
static int
ath_sysctl_forcebstuck(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int val = 0;
int error;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
if (val == 0)
return 0;
taskqueue_enqueue_fast(sc->sc_tq, &sc->sc_bstucktask);
val = 0;
return 0;
}
static int
ath_sysctl_hangcheck(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int val = 0;
int error;
uint32_t mask = 0xffffffff;
uint32_t *sp;
uint32_t rsize;
struct ath_hal *ah = sc->sc_ah;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
if (val == 0)
return 0;
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
/* Do a hang check */
if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS,
&mask, sizeof(mask),
(void *) &sp, &rsize)) {
error = 0;
goto finish;
}
device_printf(sc->sc_dev, "%s: sp=0x%08x\n", __func__, *sp);
val = 0;
error = 0;
finish:
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return (error);
}
#ifdef ATH_DEBUG_ALQ
static int
ath_sysctl_alq_log(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int error, enable;
enable = (sc->sc_alq.sc_alq_isactive);
error = sysctl_handle_int(oidp, &enable, 0, req);
if (error || !req->newptr)
return (error);
else if (enable)
error = if_ath_alq_start(&sc->sc_alq);
else
error = if_ath_alq_stop(&sc->sc_alq);
return (error);
}
/*
* Attach the ALQ debugging if required.
*/
static void
ath_sysctl_alq_attach(struct ath_softc *sc)
{
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "alq", CTLFLAG_RD,
NULL, "Atheros ALQ logging parameters");
child = SYSCTL_CHILDREN(tree);
SYSCTL_ADD_STRING(ctx, child, OID_AUTO, "filename",
CTLFLAG_RW, sc->sc_alq.sc_alq_filename, 0, "ALQ filename");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"enable", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_alq_log, "I", "");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"debugmask", CTLFLAG_RW, &sc->sc_alq.sc_alq_debug, 0,
"ALQ debug mask");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"numlost", CTLFLAG_RW, &sc->sc_alq.sc_alq_numlost, 0,
"number lost");
}
#endif /* ATH_DEBUG_ALQ */
void
ath_sysctlattach(struct ath_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
struct ath_hal *ah = sc->sc_ah;
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"countrycode", CTLFLAG_RD, &sc->sc_eecc, 0,
"EEPROM country code");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"regdomain", CTLFLAG_RD, &sc->sc_eerd, 0,
"EEPROM regdomain code");
#ifdef ATH_DEBUG
SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug,
"control debugging printfs");
#endif
#ifdef ATH_DEBUG_ALQ
SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ktrdebug", CTLFLAG_RW, &sc->sc_ktrdebug,
"control debugging KTR");
#endif /* ATH_DEBUG_ALQ */
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_slottime, "I", "802.11 slot time (us)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_softled, "I", "enable/disable software LED support");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_ledpin, "I", "GPIO pin connected to LED");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ledon", CTLFLAG_RW, &sc->sc_ledon, 0,
"setting to turn LED on");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0,
"idle time for inactivity LED (ticks)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"hardled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_hardled, "I", "enable/disable hardware LED support");
/* XXX Laziness - configure pins, then flip hardled off/on */
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"led_net_pin", CTLFLAG_RW, &sc->sc_led_net_pin, 0,
"MAC Network LED pin, or -1 to disable");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"led_pwr_pin", CTLFLAG_RW, &sc->sc_led_pwr_pin, 0,
"MAC Power LED pin, or -1 to disable");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_txantenna, "I", "antenna switch");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_rxantenna, "I", "default/rx antenna");
if (ath_hal_hasdiversity(ah))
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_diversity, "I", "antenna diversity");
sc->sc_txintrperiod = ATH_TXINTR_PERIOD;
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0,
"tx descriptor batching");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_diag, "I", "h/w diagnostic control");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_tpscale, "I", "tx power scaling");
if (ath_hal_hastpc(ah)) {
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_tpc, "I", "enable/disable per-packet TPC");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_tpack, "I", "tx power for ack frames");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_tpcts, "I", "tx power for cts frames");
}
if (ath_hal_hasrfsilent(ah)) {
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_rfsilent, "I", "h/w RF silent config");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_rfkill, "I", "enable/disable RF kill switch");
}
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"txagg", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_txagg, "I", "");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"forcebstuck", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_forcebstuck, "I", "");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"hangcheck", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_hangcheck, "I", "");
if (ath_hal_hasintmit(ah)) {
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_intmit, "I", "interference mitigation");
}
sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC;
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"monpass", CTLFLAG_RW, &sc->sc_monpass, 0,
"mask of error frames to pass when monitoring");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"hwq_limit_nonaggr", CTLFLAG_RW, &sc->sc_hwq_limit_nonaggr, 0,
"Hardware non-AMPDU queue depth before software-queuing TX frames");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"hwq_limit_aggr", CTLFLAG_RW, &sc->sc_hwq_limit_aggr, 0,
"Hardware AMPDU queue depth before software-queuing TX frames");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"tid_hwq_lo", CTLFLAG_RW, &sc->sc_tid_hwq_lo, 0,
"");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"tid_hwq_hi", CTLFLAG_RW, &sc->sc_tid_hwq_hi, 0,
"");
/* Aggregate length twiddles */
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"aggr_limit", CTLFLAG_RW, &sc->sc_aggr_limit, 0,
"Maximum A-MPDU size, or 0 for 'default'");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"rts_aggr_limit", CTLFLAG_RW, &sc->sc_rts_aggr_limit, 0,
"Maximum A-MPDU size for RTS-protected frames, or '0' "
"for default");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"delim_min_pad", CTLFLAG_RW, &sc->sc_delim_min_pad, 0,
"Enforce a minimum number of delimiters per A-MPDU "
" sub-frame");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"txq_data_minfree", CTLFLAG_RW, &sc->sc_txq_data_minfree,
0, "Minimum free buffers before adding a data frame"
" to the TX queue");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"txq_mcastq_maxdepth", CTLFLAG_RW,
&sc->sc_txq_mcastq_maxdepth, 0,
"Maximum buffer depth for multicast/broadcast frames");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"txq_node_maxdepth", CTLFLAG_RW,
&sc->sc_txq_node_maxdepth, 0,
"Maximum buffer depth for a single node");
#if 0
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"cabq_enable", CTLFLAG_RW,
&sc->sc_cabq_enable, 0,
"Whether to transmit on the CABQ or not");
#endif
#ifdef IEEE80211_SUPPORT_TDMA
if (ath_hal_macversion(ah) > 0x78) {
sc->sc_tdmadbaprep = 2;
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"dbaprep", CTLFLAG_RW, &sc->sc_tdmadbaprep, 0,
"TDMA DBA preparation time");
sc->sc_tdmaswbaprep = 10;
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"swbaprep", CTLFLAG_RW, &sc->sc_tdmaswbaprep, 0,
"TDMA SWBA preparation time");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"guardtime", CTLFLAG_RW, &sc->sc_tdmaguard, 0,
"TDMA slot guard time");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"superframe", CTLFLAG_RD, &sc->sc_tdmabintval, 0,
"TDMA calculated super frame");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"setcca", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_setcca, "I", "enable CCA control");
}
#endif
#ifdef ATH_DEBUG_ALQ
ath_sysctl_alq_attach(sc);
#endif
}
static int
ath_sysctl_clearstats(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int val = 0;
int error;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
if (val == 0)
return 0; /* Not clearing the stats is still valid */
memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
memset(&sc->sc_aggr_stats, 0, sizeof(sc->sc_aggr_stats));
memset(&sc->sc_intr_stats, 0, sizeof(sc->sc_intr_stats));
val = 0;
return 0;
}
static void
ath_sysctl_stats_attach_rxphyerr(struct ath_softc *sc, struct sysctl_oid_list *parent)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
int i;
char sn[8];
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx_phy_err", CTLFLAG_RD, NULL, "Per-code RX PHY Errors");
child = SYSCTL_CHILDREN(tree);
for (i = 0; i < 64; i++) {
snprintf(sn, sizeof(sn), "%d", i);
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, sn, CTLFLAG_RD, &sc->sc_stats.ast_rx_phy[i], 0, "");
}
}
static void
ath_sysctl_stats_attach_intr(struct ath_softc *sc,
struct sysctl_oid_list *parent)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
int i;
char sn[8];
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "sync_intr",
CTLFLAG_RD, NULL, "Sync interrupt statistics");
child = SYSCTL_CHILDREN(tree);
for (i = 0; i < 32; i++) {
snprintf(sn, sizeof(sn), "%d", i);
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, sn, CTLFLAG_RD,
&sc->sc_intr_stats.sync_intr[i], 0, "");
}
}
void
ath_sysctl_stats_attach(struct ath_softc *sc)
{
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
/* Create "clear" node */
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"clear_stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_clearstats, "I", "clear stats");
/* Create stats node */
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
NULL, "Statistics");
child = SYSCTL_CHILDREN(tree);
/* This was generated from if_athioctl.h */
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_watchdog", CTLFLAG_RD,
&sc->sc_stats.ast_watchdog, 0, "device reset by watchdog");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_hardware", CTLFLAG_RD,
&sc->sc_stats.ast_hardware, 0, "fatal hardware error interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_bmiss", CTLFLAG_RD,
&sc->sc_stats.ast_bmiss, 0, "beacon miss interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_bmiss_phantom", CTLFLAG_RD,
&sc->sc_stats.ast_bmiss_phantom, 0, "beacon miss interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_bstuck", CTLFLAG_RD,
&sc->sc_stats.ast_bstuck, 0, "beacon stuck interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rxorn", CTLFLAG_RD,
&sc->sc_stats.ast_rxorn, 0, "rx overrun interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rxeol", CTLFLAG_RD,
&sc->sc_stats.ast_rxeol, 0, "rx eol interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_txurn", CTLFLAG_RD,
&sc->sc_stats.ast_txurn, 0, "tx underrun interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_mib", CTLFLAG_RD,
&sc->sc_stats.ast_mib, 0, "mib interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_intrcoal", CTLFLAG_RD,
&sc->sc_stats.ast_intrcoal, 0, "interrupts coalesced");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_packets", CTLFLAG_RD,
&sc->sc_stats.ast_tx_packets, 0, "packet sent on the interface");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_mgmt", CTLFLAG_RD,
&sc->sc_stats.ast_tx_mgmt, 0, "management frames transmitted");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_discard", CTLFLAG_RD,
&sc->sc_stats.ast_tx_discard, 0, "frames discarded prior to assoc");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_qstop", CTLFLAG_RD,
&sc->sc_stats.ast_tx_qstop, 0, "output stopped 'cuz no buffer");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_encap", CTLFLAG_RD,
&sc->sc_stats.ast_tx_encap, 0, "tx encapsulation failed");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nonode", CTLFLAG_RD,
&sc->sc_stats.ast_tx_nonode, 0, "tx failed 'cuz no node");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nombuf", CTLFLAG_RD,
&sc->sc_stats.ast_tx_nombuf, 0, "tx failed 'cuz no mbuf");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nomcl", CTLFLAG_RD,
&sc->sc_stats.ast_tx_nomcl, 0, "tx failed 'cuz no cluster");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_linear", CTLFLAG_RD,
&sc->sc_stats.ast_tx_linear, 0, "tx linearized to cluster");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nodata", CTLFLAG_RD,
&sc->sc_stats.ast_tx_nodata, 0, "tx discarded empty frame");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_busdma", CTLFLAG_RD,
&sc->sc_stats.ast_tx_busdma, 0, "tx failed for dma resrcs");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_xretries", CTLFLAG_RD,
&sc->sc_stats.ast_tx_xretries, 0, "tx failed 'cuz too many retries");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_fifoerr", CTLFLAG_RD,
&sc->sc_stats.ast_tx_fifoerr, 0, "tx failed 'cuz FIFO underrun");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_filtered", CTLFLAG_RD,
&sc->sc_stats.ast_tx_filtered, 0, "tx failed 'cuz xmit filtered");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_shortretry", CTLFLAG_RD,
&sc->sc_stats.ast_tx_shortretry, 0, "tx on-chip retries (short)");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_longretry", CTLFLAG_RD,
&sc->sc_stats.ast_tx_longretry, 0, "tx on-chip retries (long)");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_badrate", CTLFLAG_RD,
&sc->sc_stats.ast_tx_badrate, 0, "tx failed 'cuz bogus xmit rate");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_noack", CTLFLAG_RD,
&sc->sc_stats.ast_tx_noack, 0, "tx frames with no ack marked");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_rts", CTLFLAG_RD,
&sc->sc_stats.ast_tx_rts, 0, "tx frames with rts enabled");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_cts", CTLFLAG_RD,
&sc->sc_stats.ast_tx_cts, 0, "tx frames with cts enabled");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_shortpre", CTLFLAG_RD,
&sc->sc_stats.ast_tx_shortpre, 0, "tx frames with short preamble");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_altrate", CTLFLAG_RD,
&sc->sc_stats.ast_tx_altrate, 0, "tx frames with alternate rate");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_protect", CTLFLAG_RD,
&sc->sc_stats.ast_tx_protect, 0, "tx frames with protection");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_ctsburst", CTLFLAG_RD,
&sc->sc_stats.ast_tx_ctsburst, 0, "tx frames with cts and bursting");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_ctsext", CTLFLAG_RD,
&sc->sc_stats.ast_tx_ctsext, 0, "tx frames with cts extension");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_nombuf", CTLFLAG_RD,
&sc->sc_stats.ast_rx_nombuf, 0, "rx setup failed 'cuz no mbuf");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_busdma", CTLFLAG_RD,
&sc->sc_stats.ast_rx_busdma, 0, "rx setup failed for dma resrcs");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_orn", CTLFLAG_RD,
&sc->sc_stats.ast_rx_orn, 0, "rx failed 'cuz of desc overrun");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_crcerr", CTLFLAG_RD,
&sc->sc_stats.ast_rx_crcerr, 0, "rx failed 'cuz of bad CRC");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_fifoerr", CTLFLAG_RD,
&sc->sc_stats.ast_rx_fifoerr, 0, "rx failed 'cuz of FIFO overrun");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_badcrypt", CTLFLAG_RD,
&sc->sc_stats.ast_rx_badcrypt, 0, "rx failed 'cuz decryption");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_badmic", CTLFLAG_RD,
&sc->sc_stats.ast_rx_badmic, 0, "rx failed 'cuz MIC failure");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_phyerr", CTLFLAG_RD,
&sc->sc_stats.ast_rx_phyerr, 0, "rx failed 'cuz of PHY err");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_tooshort", CTLFLAG_RD,
&sc->sc_stats.ast_rx_tooshort, 0, "rx discarded 'cuz frame too short");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_toobig", CTLFLAG_RD,
&sc->sc_stats.ast_rx_toobig, 0, "rx discarded 'cuz frame too large");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_packets", CTLFLAG_RD,
&sc->sc_stats.ast_rx_packets, 0, "packet recv on the interface");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_mgt", CTLFLAG_RD,
&sc->sc_stats.ast_rx_mgt, 0, "management frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_ctl", CTLFLAG_RD,
&sc->sc_stats.ast_rx_ctl, 0, "rx discarded 'cuz ctl frame");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_be_xmit", CTLFLAG_RD,
&sc->sc_stats.ast_be_xmit, 0, "beacons transmitted");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_be_nombuf", CTLFLAG_RD,
&sc->sc_stats.ast_be_nombuf, 0, "beacon setup failed 'cuz no mbuf");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_per_cal", CTLFLAG_RD,
&sc->sc_stats.ast_per_cal, 0, "periodic calibration calls");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_per_calfail", CTLFLAG_RD,
&sc->sc_stats.ast_per_calfail, 0, "periodic calibration failed");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_per_rfgain", CTLFLAG_RD,
&sc->sc_stats.ast_per_rfgain, 0, "periodic calibration rfgain reset");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rate_calls", CTLFLAG_RD,
&sc->sc_stats.ast_rate_calls, 0, "rate control checks");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rate_raise", CTLFLAG_RD,
&sc->sc_stats.ast_rate_raise, 0, "rate control raised xmit rate");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rate_drop", CTLFLAG_RD,
&sc->sc_stats.ast_rate_drop, 0, "rate control dropped xmit rate");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ant_defswitch", CTLFLAG_RD,
&sc->sc_stats.ast_ant_defswitch, 0, "rx/default antenna switches");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ant_txswitch", CTLFLAG_RD,
&sc->sc_stats.ast_ant_txswitch, 0, "tx antenna switches");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_cabq_xmit", CTLFLAG_RD,
&sc->sc_stats.ast_cabq_xmit, 0, "cabq frames transmitted");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_cabq_busy", CTLFLAG_RD,
&sc->sc_stats.ast_cabq_busy, 0, "cabq found busy");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_raw", CTLFLAG_RD,
&sc->sc_stats.ast_tx_raw, 0, "tx frames through raw api");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ff_txok", CTLFLAG_RD,
&sc->sc_stats.ast_ff_txok, 0, "fast frames tx'd successfully");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ff_txerr", CTLFLAG_RD,
&sc->sc_stats.ast_ff_txerr, 0, "fast frames tx'd w/ error");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ff_rx", CTLFLAG_RD,
&sc->sc_stats.ast_ff_rx, 0, "fast frames rx'd");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ff_flush", CTLFLAG_RD,
&sc->sc_stats.ast_ff_flush, 0, "fast frames flushed from staging q");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_qfull", CTLFLAG_RD,
&sc->sc_stats.ast_tx_qfull, 0, "tx dropped 'cuz of queue limit");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nobuf", CTLFLAG_RD,
&sc->sc_stats.ast_tx_nobuf, 0, "tx dropped 'cuz no ath buffer");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tdma_update", CTLFLAG_RD,
&sc->sc_stats.ast_tdma_update, 0, "TDMA slot timing updates");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tdma_timers", CTLFLAG_RD,
&sc->sc_stats.ast_tdma_timers, 0, "TDMA slot update set beacon timers");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tdma_tsf", CTLFLAG_RD,
&sc->sc_stats.ast_tdma_tsf, 0, "TDMA slot update set TSF");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tdma_ack", CTLFLAG_RD,
&sc->sc_stats.ast_tdma_ack, 0, "TDMA tx failed 'cuz ACK required");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_raw_fail", CTLFLAG_RD,
&sc->sc_stats.ast_tx_raw_fail, 0, "raw tx failed 'cuz h/w down");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_nofrag", CTLFLAG_RD,
&sc->sc_stats.ast_tx_nofrag, 0, "tx dropped 'cuz no ath frag buffer");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_be_missed", CTLFLAG_RD,
&sc->sc_stats.ast_be_missed, 0, "number of -missed- beacons");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_ani_cal", CTLFLAG_RD,
&sc->sc_stats.ast_ani_cal, 0, "number of ANI polls");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_agg", CTLFLAG_RD,
&sc->sc_stats.ast_rx_agg, 0, "number of aggregate frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_halfgi", CTLFLAG_RD,
&sc->sc_stats.ast_rx_halfgi, 0, "number of frames received with half-GI");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_2040", CTLFLAG_RD,
&sc->sc_stats.ast_rx_2040, 0, "number of HT/40 frames received");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_pre_crc_err", CTLFLAG_RD,
&sc->sc_stats.ast_rx_pre_crc_err, 0, "number of delimeter-CRC errors detected");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_post_crc_err", CTLFLAG_RD,
&sc->sc_stats.ast_rx_post_crc_err, 0, "number of post-delimiter CRC errors detected");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_decrypt_busy_err", CTLFLAG_RD,
&sc->sc_stats.ast_rx_decrypt_busy_err, 0, "number of frames received w/ busy decrypt engine");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_hi_rx_chain", CTLFLAG_RD,
&sc->sc_stats.ast_rx_hi_rx_chain, 0, "");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_htprotect", CTLFLAG_RD,
&sc->sc_stats.ast_tx_htprotect, 0, "HT tx frames with protection");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_hitqueueend", CTLFLAG_RD,
&sc->sc_stats.ast_rx_hitqueueend, 0, "RX hit queue end");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_timeout", CTLFLAG_RD,
&sc->sc_stats.ast_tx_timeout, 0, "TX Global Timeout");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_cst", CTLFLAG_RD,
&sc->sc_stats.ast_tx_cst, 0, "TX Carrier Sense Timeout");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_xtxop", CTLFLAG_RD,
&sc->sc_stats.ast_tx_xtxop, 0, "TX exceeded TXOP");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_timerexpired", CTLFLAG_RD,
&sc->sc_stats.ast_tx_timerexpired, 0, "TX exceeded TX_TIMER register");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_desccfgerr", CTLFLAG_RD,
&sc->sc_stats.ast_tx_desccfgerr, 0, "TX Descriptor Cfg Error");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_swretries", CTLFLAG_RD,
&sc->sc_stats.ast_tx_swretries, 0, "TX software retry count");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_swretrymax", CTLFLAG_RD,
&sc->sc_stats.ast_tx_swretrymax, 0, "TX software retry max reached");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_data_underrun", CTLFLAG_RD,
&sc->sc_stats.ast_tx_data_underrun, 0, "");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_delim_underrun", CTLFLAG_RD,
&sc->sc_stats.ast_tx_delim_underrun, 0, "");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_aggr_failall", CTLFLAG_RD,
&sc->sc_stats.ast_tx_aggr_failall, 0,
"Number of aggregate TX failures (whole frame)");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_aggr_ok", CTLFLAG_RD,
&sc->sc_stats.ast_tx_aggr_ok, 0,
"Number of aggregate TX OK completions (subframe)");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_aggr_fail", CTLFLAG_RD,
&sc->sc_stats.ast_tx_aggr_fail, 0,
"Number of aggregate TX failures (subframe)");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_intr", CTLFLAG_RD,
&sc->sc_stats.ast_rx_intr, 0, "RX interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_intr", CTLFLAG_RD,
&sc->sc_stats.ast_tx_intr, 0, "TX interrupts");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_mcastq_overflow",
CTLFLAG_RD, &sc->sc_stats.ast_tx_mcastq_overflow, 0,
"Number of multicast frames exceeding maximum mcast queue depth");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_keymiss", CTLFLAG_RD,
&sc->sc_stats.ast_rx_keymiss, 0, "");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_tx_swfiltered", CTLFLAG_RD,
&sc->sc_stats.ast_tx_swfiltered, 0, "");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "ast_rx_stbc",
CTLFLAG_RD, &sc->sc_stats.ast_rx_stbc, 0,
"Number of STBC frames received");
/* Attach the RX phy error array */
ath_sysctl_stats_attach_rxphyerr(sc, child);
/* Attach the interrupt statistics array */
ath_sysctl_stats_attach_intr(sc, child);
}
/*
* This doesn't necessarily belong here (because it's HAL related, not
* driver related).
*/
void
ath_sysctl_hal_attach(struct ath_softc *sc)
{
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hal", CTLFLAG_RD,
NULL, "Atheros HAL parameters");
child = SYSCTL_CHILDREN(tree);
sc->sc_ah->ah_config.ah_debug = 0;
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_debug, 0, "Atheros HAL debugging printfs");
sc->sc_ah->ah_config.ah_ar5416_biasadj = 0;
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ar5416_biasadj", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_ar5416_biasadj, 0,
"Enable 2GHz AR5416 direction sensitivity bias adjust");
sc->sc_ah->ah_config.ah_dma_beacon_response_time = 2;
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "dma_brt", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_dma_beacon_response_time, 0,
"Atheros HAL DMA beacon response time");
sc->sc_ah->ah_config.ah_sw_beacon_response_time = 10;
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "sw_brt", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_sw_beacon_response_time, 0,
"Atheros HAL software beacon response time");
sc->sc_ah->ah_config.ah_additional_swba_backoff = 0;
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "swba_backoff", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_additional_swba_backoff, 0,
"Atheros HAL additional SWBA backoff time");
sc->sc_ah->ah_config.ah_force_full_reset = 0;
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "force_full_reset", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_force_full_reset, 0,
"Force full chip reset rather than a warm reset");
/*
* This is initialised by the driver.
*/
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "serialise_reg_war", CTLFLAG_RW,
&sc->sc_ah->ah_config.ah_serialise_reg_war, 0,
"Force register access serialisation");
}
Index: head/sys/dev/ath/if_ath_tdma.c
===================================================================
--- head/sys/dev/ath/if_ath_tdma.c (revision 287196)
+++ head/sys/dev/ath/if_ath_tdma.c (revision 287197)
@@ -1,687 +1,686 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Atheros Wireless LAN controller.
*
* This software is derived from work of Atsushi Onoe; his contribution
* is greatly appreciated.
*/
#include "opt_inet.h"
#include "opt_ath.h"
/*
* This is needed for register operations which are performed
* by the driver - eg, calls to ath_hal_gettsf32().
*
* It's also required for any AH_DEBUG checks in here, eg the
* module dependencies.
*/
#include "opt_ah.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/module.h>
#include <sys/ktr.h>
#include <sys/smp.h> /* for mp_ncpus */
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
#include <dev/ath/ath_hal/ah_diagcodes.h>
#include <dev/ath/if_ath_debug.h>
#include <dev/ath/if_ath_misc.h>
#include <dev/ath/if_ath_tsf.h>
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_sysctl.h>
#include <dev/ath/if_ath_led.h>
#include <dev/ath/if_ath_keycache.h>
#include <dev/ath/if_ath_rx.h>
#include <dev/ath/if_ath_beacon.h>
#include <dev/ath/if_athdfs.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
#ifdef ATH_DEBUG_ALQ
#include <dev/ath/if_ath_alq.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <dev/ath/if_ath_tdma.h>
static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
u_int32_t bintval);
static void ath_tdma_bintvalsetup(struct ath_softc *sc,
const struct ieee80211_tdma_state *tdma);
#endif /* IEEE80211_SUPPORT_TDMA */
#ifdef IEEE80211_SUPPORT_TDMA
static void
ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
{
struct ath_hal *ah = sc->sc_ah;
HAL_BEACON_TIMERS bt;
bt.bt_intval = bintval | HAL_BEACON_ENA;
bt.bt_nexttbtt = nexttbtt;
bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
bt.bt_nextatim = nexttbtt+1;
/* Enables TBTT, DBA, SWBA timers by default */
bt.bt_flags = 0;
#if 0
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"%s: intval=%d (0x%08x) nexttbtt=%u (0x%08x), nextdba=%u (0x%08x), nextswba=%u (0x%08x),nextatim=%u (0x%08x)\n",
__func__,
bt.bt_intval,
bt.bt_intval,
bt.bt_nexttbtt,
bt.bt_nexttbtt,
bt.bt_nextdba,
bt.bt_nextdba,
bt.bt_nextswba,
bt.bt_nextswba,
bt.bt_nextatim,
bt.bt_nextatim);
#endif
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_TDMA_TIMER_SET)) {
struct if_ath_alq_tdma_timer_set t;
t.bt_intval = htobe32(bt.bt_intval);
t.bt_nexttbtt = htobe32(bt.bt_nexttbtt);
t.bt_nextdba = htobe32(bt.bt_nextdba);
t.bt_nextswba = htobe32(bt.bt_nextswba);
t.bt_nextatim = htobe32(bt.bt_nextatim);
t.bt_flags = htobe32(bt.bt_flags);
t.sc_tdmadbaprep = htobe32(sc->sc_tdmadbaprep);
t.sc_tdmaswbaprep = htobe32(sc->sc_tdmaswbaprep);
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_TIMER_SET,
sizeof(t), (char *) &t);
}
#endif
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"%s: nexttbtt=%u (0x%08x), nexttbtt tsf=%lld (0x%08llx)\n",
__func__,
bt.bt_nexttbtt,
bt.bt_nexttbtt,
(long long) ( ((u_int64_t) (bt.bt_nexttbtt)) << 10),
(long long) ( ((u_int64_t) (bt.bt_nexttbtt)) << 10));
ath_hal_beaconsettimers(ah, &bt);
}
/*
* Calculate the beacon interval. This is periodic in the
* superframe for the bss. We assume each station is configured
* identically wrt transmit rate so the guard time we calculate
* above will be the same on all stations. Note we need to
* factor in the xmit time because the hardware will schedule
* a frame for transmit if the start of the frame is within
* the burst time. When we get hardware that properly kills
* frames in the PCU we can reduce/eliminate the guard time.
*
* Roundup to 1024 is so we have 1 TU buffer in the guard time
* to deal with the granularity of the nexttbtt timer. 11n MAC's
* with 1us timer granularity should allow us to reduce/eliminate
* this.
*/
static void
ath_tdma_bintvalsetup(struct ath_softc *sc,
const struct ieee80211_tdma_state *tdma)
{
/* copy from vap state (XXX check all vaps have same value?) */
sc->sc_tdmaslotlen = tdma->tdma_slotlen;
sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
tdma->tdma_slotcnt, 1024);
sc->sc_tdmabintval >>= 10; /* TSF -> TU */
if (sc->sc_tdmabintval & 1)
sc->sc_tdmabintval++;
if (tdma->tdma_slot == 0) {
/*
* Only slot 0 beacons; other slots respond.
*/
sc->sc_imask |= HAL_INT_SWBA;
sc->sc_tdmaswba = 0; /* beacon immediately */
} else {
/* XXX all vaps must be slot 0 or slot !0 */
sc->sc_imask &= ~HAL_INT_SWBA;
}
}
/*
* Max 802.11 overhead. This assumes no 4-address frames and
* the encapsulation done by ieee80211_encap (llc). We also
* include potential crypto overhead.
*/
#define IEEE80211_MAXOVERHEAD \
(sizeof(struct ieee80211_qosframe) \
+ sizeof(struct llc) \
+ IEEE80211_ADDR_LEN \
+ IEEE80211_WEP_IVLEN \
+ IEEE80211_WEP_KIDLEN \
+ IEEE80211_WEP_CRCLEN \
+ IEEE80211_WEP_MICLEN \
+ IEEE80211_CRC_LEN)
/*
* Setup initially for tdma operation. Start the beacon
* timers and enable SWBA if we are slot 0. Otherwise
* we wait for slot 0 to arrive so we can sync up before
* starting to transmit.
*/
void
ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
{
struct ath_hal *ah = sc->sc_ah;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct ieee80211_txparam *tp;
const struct ieee80211_tdma_state *tdma = NULL;
int rix;
if (vap == NULL) {
vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
if (vap == NULL) {
device_printf(sc->sc_dev, "%s: no vaps?\n", __func__);
return;
}
}
/* XXX should take a locked ref to iv_bss */
tp = vap->iv_bss->ni_txparms;
/*
* Calculate the guard time for each slot. This is the
* time to send a maximal-size frame according to the
* fixed/lowest transmit rate. Note that the interface
* mtu does not include the 802.11 overhead so we must
* tack that on (ath_hal_computetxtime includes the
* preamble and plcp in it's calculation).
*/
tdma = vap->iv_tdma;
if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rix = ath_tx_findrix(sc, tp->ucastrate);
else
rix = ath_tx_findrix(sc, tp->mcastrate);
/*
* If the chip supports enforcing TxOP on transmission,
* we can just delete the guard window. It isn't at all required.
*/
if (sc->sc_hasenforcetxop) {
sc->sc_tdmaguard = 0;
} else {
/* XXX short preamble assumed */
/* XXX non-11n rate assumed */
sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
- ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
+ vap->iv_ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
}
ath_hal_intrset(ah, 0);
ath_beaconq_config(sc); /* setup h/w beacon q */
if (sc->sc_setcca)
ath_hal_setcca(ah, AH_FALSE); /* disable CCA */
ath_tdma_bintvalsetup(sc, tdma); /* calculate beacon interval */
ath_tdma_settimers(sc, sc->sc_tdmabintval,
sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
sc->sc_syncbeacon = 0;
sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
ath_hal_intrset(ah, sc->sc_imask);
DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
"bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
sc->sc_tdmadbaprep);
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_TDMA_TIMER_CONFIG)) {
struct if_ath_alq_tdma_timer_config t;
t.tdma_slot = htobe32(tdma->tdma_slot);
t.tdma_slotlen = htobe32(tdma->tdma_slotlen);
t.tdma_slotcnt = htobe32(tdma->tdma_slotcnt);
t.tdma_bintval = htobe32(tdma->tdma_bintval);
t.tdma_guard = htobe32(sc->sc_tdmaguard);
t.tdma_scbintval = htobe32(sc->sc_tdmabintval);
t.tdma_dbaprep = htobe32(sc->sc_tdmadbaprep);
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_TIMER_CONFIG,
sizeof(t), (char *) &t);
}
#endif /* ATH_DEBUG_ALQ */
}
/*
* Update tdma operation. Called from the 802.11 layer
* when a beacon is received from the TDMA station operating
* in the slot immediately preceding us in the bss. Use
* the rx timestamp for the beacon frame to update our
* beacon timers so we follow their schedule. Note that
* by using the rx timestamp we implicitly include the
* propagation delay in our schedule.
*
* XXX TODO: since the changes for the AR5416 and later chips
* involved changing the TSF/TU calculations, we need to make
* sure that various calculations wrap consistently.
*
* A lot of the problems stemmed from the calculations wrapping
* at 65,535 TU. Since a lot of the math is still being done in
* TU, please audit it to ensure that when the TU values programmed
* into the timers wrap at (2^31)-1 TSF, all the various terms
* wrap consistently.
*/
void
ath_tdma_update(struct ieee80211_node *ni,
const struct ieee80211_tdma_param *tdma, int changed)
{
#define TSF_TO_TU(_h,_l) \
((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
#define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10)
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ath_softc *sc = ic->ic_softc;
struct ath_hal *ah = sc->sc_ah;
const HAL_RATE_TABLE *rt = sc->sc_currates;
u_int64_t tsf, rstamp, nextslot, nexttbtt, nexttbtt_full;
u_int32_t txtime, nextslottu;
int32_t tudelta, tsfdelta;
const struct ath_rx_status *rs;
int rix;
sc->sc_stats.ast_tdma_update++;
/*
* Check for and adopt configuration changes.
*/
if (changed != 0) {
const struct ieee80211_tdma_state *ts = vap->iv_tdma;
ath_tdma_bintvalsetup(sc, ts);
if (changed & TDMA_UPDATE_SLOTLEN)
ath_wme_update(ic);
DPRINTF(sc, ATH_DEBUG_TDMA,
"%s: adopt slot %u slotcnt %u slotlen %u us "
"bintval %u TU\n", __func__,
ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
sc->sc_tdmabintval);
/* XXX right? */
ath_hal_intrset(ah, sc->sc_imask);
/* NB: beacon timers programmed below */
}
/* extend rx timestamp to 64 bits */
rs = sc->sc_lastrs;
tsf = ath_hal_gettsf64(ah);
rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
/*
* The rx timestamp is set by the hardware on completing
* reception (at the point where the rx descriptor is DMA'd
* to the host). To find the start of our next slot we
* must adjust this time by the time required to send
* the packet just received.
*/
rix = rt->rateCodeToIndex[rs->rs_rate];
/*
* To calculate the packet duration for legacy rates, we
* only need the rix and preamble.
*
* For 11n non-aggregate frames, we also need the channel
* width and short/long guard interval.
*
* For 11n aggregate frames, the required hacks are a little
* more subtle. You need to figure out the frame duration
* for each frame, including the delimiters. However, when
* a frame isn't received successfully, we won't hear it
* (unless you enable reception of CRC errored frames), so
* your duration calculation is going to be off.
*
* However, we can assume that the beacon frames won't be
* transmitted as aggregate frames, so we should be okay.
* Just add a check to ensure that we aren't handed something
* bad.
*
* For ath_hal_pkt_txtime() - for 11n rates, shortPreamble is
* actually short guard interval. For legacy rates,
* it's short preamble.
*/
txtime = ath_hal_pkt_txtime(ah, rt, rs->rs_datalen,
rix,
!! (rs->rs_flags & HAL_RX_2040),
(rix & 0x80) ?
(! (rs->rs_flags & HAL_RX_GI)) : rt->info[rix].shortPreamble);
/* NB: << 9 is to cvt to TU and /2 */
nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
/*
* For 802.11n chips: nextslottu needs to be the full TSF space,
* not just 0..65535 TU.
*/
nextslottu = TSF_TO_TU(nextslot>>32, nextslot);
/*
* Retrieve the hardware NextTBTT in usecs
* and calculate the difference between what the
* other station thinks and what we have programmed. This
* lets us figure how to adjust our timers to match. The
* adjustments are done by pulling the TSF forward and possibly
* rewriting the beacon timers.
*/
/*
* The logic here assumes the nexttbtt counter is in TSF
* but the prr-11n NICs are in TU. The HAL shifts them
* to TSF but there's two important differences:
*
* + The TU->TSF values have 0's for the low 9 bits, and
* + The counter wraps at TU_TO_TSF(HAL_BEACON_PERIOD + 1) for
* the pre-11n NICs, but not for the 11n NICs.
*
* So for now, just make sure the nexttbtt value we get
* matches the second issue or once nexttbtt exceeds this
* value, tsfdelta ends up becoming very negative and all
* of the adjustments get very messed up.
*/
/*
* We need to track the full nexttbtt rather than having it
* truncated at HAL_BEACON_PERIOD, as programming the
* nexttbtt (and related) registers for the 11n chips is
* actually going to take the full 32 bit space, rather than
* just 0..65535 TU.
*/
nexttbtt_full = ath_hal_getnexttbtt(ah);
nexttbtt = nexttbtt_full % (TU_TO_TSF(HAL_BEACON_PERIOD + 1));
tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD + 1)) - nexttbtt);
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"rs->rstamp %llu rstamp %llu tsf %llu txtime %d, nextslot %llu, "
"nextslottu %d, nextslottume %d\n",
(unsigned long long) rs->rs_tstamp,
(unsigned long long) rstamp,
(unsigned long long) tsf, txtime,
(unsigned long long) nextslot,
nextslottu, TSF_TO_TU(nextslot >> 32, nextslot));
DPRINTF(sc, ATH_DEBUG_TDMA,
" beacon tstamp: %llu (0x%016llx)\n",
(unsigned long long) le64toh(ni->ni_tstamp.tsf),
(unsigned long long) le64toh(ni->ni_tstamp.tsf));
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"nexttbtt %llu (0x%08llx) tsfdelta %d avg +%d/-%d\n",
(unsigned long long) nexttbtt,
(long long) nexttbtt,
tsfdelta,
TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
if (tsfdelta < 0) {
TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
tsfdelta = -tsfdelta % 1024;
nextslottu++;
} else if (tsfdelta > 0) {
TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
tsfdelta = 1024 - (tsfdelta % 1024);
nextslottu++;
} else {
TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
}
tudelta = nextslottu - TSF_TO_TU(nexttbtt_full >> 32, nexttbtt_full);
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_TDMA_BEACON_STATE)) {
struct if_ath_alq_tdma_beacon_state t;
t.rx_tsf = htobe64(rstamp);
t.beacon_tsf = htobe64(le64toh(ni->ni_tstamp.tsf));
t.tsf64 = htobe64(tsf);
t.nextslot_tsf = htobe64(nextslot);
t.nextslot_tu = htobe32(nextslottu);
t.txtime = htobe32(txtime);
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_BEACON_STATE,
sizeof(t), (char *) &t);
}
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_TDMA_SLOT_CALC)) {
struct if_ath_alq_tdma_slot_calc t;
t.nexttbtt = htobe64(nexttbtt_full);
t.next_slot = htobe64(nextslot);
t.tsfdelta = htobe32(tsfdelta);
t.avg_plus = htobe32(TDMA_AVG(sc->sc_avgtsfdeltap));
t.avg_minus = htobe32(TDMA_AVG(sc->sc_avgtsfdeltam));
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_SLOT_CALC,
sizeof(t), (char *) &t);
}
#endif
/*
* Copy sender's timetstamp into tdma ie so they can
* calculate roundtrip time. We submit a beacon frame
* below after any timer adjustment. The frame goes out
* at the next TBTT so the sender can calculate the
* roundtrip by inspecting the tdma ie in our beacon frame.
*
* NB: This tstamp is subtlely preserved when
* IEEE80211_BEACON_TDMA is marked (e.g. when the
* slot position changes) because ieee80211_add_tdma
* skips over the data.
*/
memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
__offsetof(struct ieee80211_tdma_param, tdma_tstamp),
&ni->ni_tstamp.data, 8);
#if 0
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"tsf %llu nextslot %llu (%d, %d) nextslottu %u nexttbtt %llu (%d)\n",
(unsigned long long) tsf, (unsigned long long) nextslot,
(int)(nextslot - tsf), tsfdelta, nextslottu, nexttbtt, tudelta);
#endif
/*
* Adjust the beacon timers only when pulling them forward
* or when going back by less than the beacon interval.
* Negative jumps larger than the beacon interval seem to
* cause the timers to stop and generally cause instability.
* This basically filters out jumps due to missed beacons.
*/
if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"%s: calling ath_tdma_settimers; nextslottu=%d, bintval=%d\n",
__func__,
nextslottu,
sc->sc_tdmabintval);
ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
sc->sc_stats.ast_tdma_timers++;
}
if (tsfdelta > 0) {
uint64_t tsf;
/* XXX should just teach ath_hal_adjusttsf() to do this */
tsf = ath_hal_gettsf64(ah);
ath_hal_settsf64(ah, tsf + tsfdelta);
DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
"%s: calling ath_hal_adjusttsf: TSF=%llu, tsfdelta=%d\n",
__func__,
(unsigned long long) tsf,
tsfdelta);
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq,
ATH_ALQ_TDMA_TSF_ADJUST)) {
struct if_ath_alq_tdma_tsf_adjust t;
t.tsfdelta = htobe32(tsfdelta);
t.tsf64_old = htobe64(tsf);
t.tsf64_new = htobe64(tsf + tsfdelta);
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_TDMA_TSF_ADJUST,
sizeof(t), (char *) &t);
}
#endif /* ATH_DEBUG_ALQ */
sc->sc_stats.ast_tdma_tsf++;
}
ath_tdma_beacon_send(sc, vap); /* prepare response */
#undef TU_TO_TSF
#undef TSF_TO_TU
}
/*
* Transmit a beacon frame at SWBA. Dynamic updates
* to the frame contents are done as needed.
*/
void
ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
{
struct ath_hal *ah = sc->sc_ah;
struct ath_buf *bf;
int otherant;
/*
* Check if the previous beacon has gone out. If
* not don't try to post another, skip this period
* and wait for the next. Missed beacons indicate
* a problem and should not occur. If we miss too
* many consecutive beacons reset the device.
*/
if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
sc->sc_bmisscount++;
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: missed %u consecutive beacons\n",
__func__, sc->sc_bmisscount);
if (sc->sc_bmisscount >= ath_bstuck_threshold)
taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
return;
}
if (sc->sc_bmisscount != 0) {
DPRINTF(sc, ATH_DEBUG_BEACON,
"%s: resume beacon xmit after %u misses\n",
__func__, sc->sc_bmisscount);
sc->sc_bmisscount = 0;
}
/*
* Check recent per-antenna transmit statistics and flip
* the default antenna if noticeably more frames went out
* on the non-default antenna.
* XXX assumes 2 anntenae
*/
if (!sc->sc_diversity) {
otherant = sc->sc_defant & 1 ? 2 : 1;
if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
ath_setdefantenna(sc, otherant);
sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
}
bf = ath_beacon_generate(sc, vap);
/* XXX We don't do cabq traffic, but just for completeness .. */
ATH_TXQ_LOCK(sc->sc_cabq);
ath_beacon_cabq_start(sc);
ATH_TXQ_UNLOCK(sc->sc_cabq);
if (bf != NULL) {
/*
* Stop any current dma and put the new frame on the queue.
* This should never fail since we check above that no frames
* are still pending on the queue.
*/
if ((! sc->sc_isedma) &&
(! ath_hal_stoptxdma(ah, sc->sc_bhalq))) {
DPRINTF(sc, ATH_DEBUG_ANY,
"%s: beacon queue %u did not stop?\n",
__func__, sc->sc_bhalq);
/* NB: the HAL still stops DMA, so proceed */
}
ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
ath_hal_txstart(ah, sc->sc_bhalq);
sc->sc_stats.ast_be_xmit++; /* XXX per-vap? */
/*
* Record local TSF for our last send for use
* in arbitrating slot collisions.
*/
/* XXX should take a locked ref to iv_bss */
vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
}
}
#endif /* IEEE80211_SUPPORT_TDMA */
Index: head/sys/dev/ath/if_ath_tx.c
===================================================================
--- head/sys/dev/ath/if_ath_tx.c (revision 287196)
+++ head/sys/dev/ath/if_ath_tx.c (revision 287197)
@@ -1,6230 +1,6223 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Atheros Wireless LAN controller.
*
* This software is derived from work of Atsushi Onoe; his contribution
* is greatly appreciated.
*/
#include "opt_inet.h"
#include "opt_ath.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/ktr.h>
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#include <net80211/ieee80211_ht.h>
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
#include <dev/ath/ath_hal/ah_diagcodes.h>
#include <dev/ath/if_ath_debug.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
#include <dev/ath/if_ath_misc.h>
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_tx_ht.h>
#ifdef ATH_DEBUG_ALQ
#include <dev/ath/if_ath_alq.h>
#endif
/*
* How many retries to perform in software
*/
#define SWMAX_RETRIES 10
/*
* What queue to throw the non-QoS TID traffic into
*/
#define ATH_NONQOS_TID_AC WME_AC_VO
#if 0
static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
#endif
static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
int tid);
static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
int tid);
static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
struct ieee80211_node *ni, struct mbuf *m0, int *tid);
static struct ath_buf *
ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid, struct ath_buf *bf);
#ifdef ATH_DEBUG_ALQ
void
ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
{
struct ath_buf *bf;
int i, n;
const char *ds;
/* XXX we should skip out early if debugging isn't enabled! */
bf = bf_first;
while (bf != NULL) {
/* XXX should ensure bf_nseg > 0! */
if (bf->bf_nseg == 0)
break;
n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
for (i = 0, ds = (const char *) bf->bf_desc;
i < n;
i++, ds += sc->sc_tx_desclen) {
if_ath_alq_post(&sc->sc_alq,
ATH_ALQ_EDMA_TXDESC,
sc->sc_tx_desclen,
ds);
}
bf = bf->bf_next;
}
}
#endif /* ATH_DEBUG_ALQ */
/*
* Whether to use the 11n rate scenario functions or not
*/
static inline int
ath_tx_is_11n(struct ath_softc *sc)
{
return ((sc->sc_ah->ah_magic == 0x20065416) ||
(sc->sc_ah->ah_magic == 0x19741014));
}
/*
* Obtain the current TID from the given frame.
*
* Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
* This has implications for which AC/priority the packet is placed
* in.
*/
static int
ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
{
const struct ieee80211_frame *wh;
int pri = M_WME_GETAC(m0);
wh = mtod(m0, const struct ieee80211_frame *);
if (! IEEE80211_QOS_HAS_SEQ(wh))
return IEEE80211_NONQOS_TID;
else
return WME_AC_TO_TID(pri);
}
static void
ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
{
struct ieee80211_frame *wh;
wh = mtod(bf->bf_m, struct ieee80211_frame *);
/* Only update/resync if needed */
if (bf->bf_state.bfs_isretried == 0) {
wh->i_fc[1] |= IEEE80211_FC1_RETRY;
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_PREWRITE);
}
bf->bf_state.bfs_isretried = 1;
bf->bf_state.bfs_retries ++;
}
/*
* Determine what the correct AC queue for the given frame
* should be.
*
* This code assumes that the TIDs map consistently to
* the underlying hardware (or software) ath_txq.
* Since the sender may try to set an AC which is
* arbitrary, non-QoS TIDs may end up being put on
* completely different ACs. There's no way to put a
* TID into multiple ath_txq's for scheduling, so
* for now we override the AC/TXQ selection and set
* non-QOS TID frames into the BE queue.
*
* This may be completely incorrect - specifically,
* some management frames may end up out of order
* compared to the QoS traffic they're controlling.
* I'll look into this later.
*/
static int
ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
{
const struct ieee80211_frame *wh;
int pri = M_WME_GETAC(m0);
wh = mtod(m0, const struct ieee80211_frame *);
if (IEEE80211_QOS_HAS_SEQ(wh))
return pri;
return ATH_NONQOS_TID_AC;
}
void
ath_txfrag_cleanup(struct ath_softc *sc,
ath_bufhead *frags, struct ieee80211_node *ni)
{
struct ath_buf *bf, *next;
ATH_TXBUF_LOCK_ASSERT(sc);
TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
/* NB: bf assumed clean */
TAILQ_REMOVE(frags, bf, bf_list);
ath_returnbuf_head(sc, bf);
ieee80211_node_decref(ni);
}
}
/*
* Setup xmit of a fragmented frame. Allocate a buffer
* for each frag and bump the node reference count to
* reflect the held reference to be setup by ath_tx_start.
*/
int
ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
struct mbuf *m0, struct ieee80211_node *ni)
{
struct mbuf *m;
struct ath_buf *bf;
ATH_TXBUF_LOCK(sc);
for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
/* XXX non-management? */
bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
if (bf == NULL) { /* out of buffers, cleanup */
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
__func__);
ath_txfrag_cleanup(sc, frags, ni);
break;
}
ieee80211_node_incref(ni);
TAILQ_INSERT_TAIL(frags, bf, bf_list);
}
ATH_TXBUF_UNLOCK(sc);
return !TAILQ_EMPTY(frags);
}
/*
* Reclaim mbuf resources. For fragmented frames we
* need to claim each frag chained with m_nextpkt.
*/
void
ath_freetx(struct mbuf *m)
{
struct mbuf *next;
do {
next = m->m_nextpkt;
m->m_nextpkt = NULL;
m_freem(m);
} while ((m = next) != NULL);
}
static int
ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
{
struct mbuf *m;
int error;
/*
* Load the DMA map so any coalescing is done. This
* also calculates the number of descriptors we need.
*/
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error == EFBIG) {
/* XXX packet requires too many descriptors */
bf->bf_nseg = ATH_MAX_SCATTER + 1;
} else if (error != 0) {
sc->sc_stats.ast_tx_busdma++;
ath_freetx(m0);
return error;
}
/*
* Discard null packets and check for packets that
* require too many TX descriptors. We try to convert
* the latter to a cluster.
*/
if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */
sc->sc_stats.ast_tx_linear++;
m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
if (m == NULL) {
ath_freetx(m0);
sc->sc_stats.ast_tx_nombuf++;
return ENOMEM;
}
m0 = m;
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error != 0) {
sc->sc_stats.ast_tx_busdma++;
ath_freetx(m0);
return error;
}
KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
("too many segments after defrag; nseg %u", bf->bf_nseg));
} else if (bf->bf_nseg == 0) { /* null packet, discard */
sc->sc_stats.ast_tx_nodata++;
ath_freetx(m0);
return EIO;
}
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
__func__, m0, m0->m_pkthdr.len);
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
bf->bf_m = m0;
return 0;
}
/*
* Chain together segments+descriptors for a frame - 11n or otherwise.
*
* For aggregates, this is called on each frame in the aggregate.
*/
static void
ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
struct ath_buf *bf, int is_aggr, int is_first_subframe,
int is_last_subframe)
{
struct ath_hal *ah = sc->sc_ah;
char *ds;
int i, bp, dsp;
HAL_DMA_ADDR bufAddrList[4];
uint32_t segLenList[4];
int numTxMaps = 1;
int isFirstDesc = 1;
/*
* XXX There's txdma and txdma_mgmt; the descriptor
* sizes must match.
*/
struct ath_descdma *dd = &sc->sc_txdma;
/*
* Fillin the remainder of the descriptor info.
*/
/*
* We need the number of TX data pointers in each descriptor.
* EDMA and later chips support 4 TX buffers per descriptor;
* previous chips just support one.
*/
numTxMaps = sc->sc_tx_nmaps;
/*
* For EDMA and later chips ensure the TX map is fully populated
* before advancing to the next descriptor.
*/
ds = (char *) bf->bf_desc;
bp = dsp = 0;
bzero(bufAddrList, sizeof(bufAddrList));
bzero(segLenList, sizeof(segLenList));
for (i = 0; i < bf->bf_nseg; i++) {
bufAddrList[bp] = bf->bf_segs[i].ds_addr;
segLenList[bp] = bf->bf_segs[i].ds_len;
bp++;
/*
* Go to the next segment if this isn't the last segment
* and there's space in the current TX map.
*/
if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
continue;
/*
* Last segment or we're out of buffer pointers.
*/
bp = 0;
if (i == bf->bf_nseg - 1)
ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
else
ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
bf->bf_daddr + dd->dd_descsize * (dsp + 1));
/*
* XXX This assumes that bfs_txq is the actual destination
* hardware queue at this point. It may not have been
* assigned, it may actually be pointing to the multicast
* software TXQ id. These must be fixed!
*/
ath_hal_filltxdesc(ah, (struct ath_desc *) ds
, bufAddrList
, segLenList
, bf->bf_descid /* XXX desc id */
, bf->bf_state.bfs_tx_queue
, isFirstDesc /* first segment */
, i == bf->bf_nseg - 1 /* last segment */
, (struct ath_desc *) ds0 /* first descriptor */
);
/*
* Make sure the 11n aggregate fields are cleared.
*
* XXX TODO: this doesn't need to be called for
* aggregate frames; as it'll be called on all
* sub-frames. Since the descriptors are in
* non-cacheable memory, this leads to some
* rather slow writes on MIPS/ARM platforms.
*/
if (ath_tx_is_11n(sc))
ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
/*
* If 11n is enabled, set it up as if it's an aggregate
* frame.
*/
if (is_last_subframe) {
ath_hal_set11n_aggr_last(sc->sc_ah,
(struct ath_desc *) ds);
} else if (is_aggr) {
/*
* This clears the aggrlen field; so
* the caller needs to call set_aggr_first()!
*
* XXX TODO: don't call this for the first
* descriptor in the first frame in an
* aggregate!
*/
ath_hal_set11n_aggr_middle(sc->sc_ah,
(struct ath_desc *) ds,
bf->bf_state.bfs_ndelim);
}
isFirstDesc = 0;
bf->bf_lastds = (struct ath_desc *) ds;
/*
* Don't forget to skip to the next descriptor.
*/
ds += sc->sc_tx_desclen;
dsp++;
/*
* .. and don't forget to blank these out!
*/
bzero(bufAddrList, sizeof(bufAddrList));
bzero(segLenList, sizeof(segLenList));
}
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
}
/*
* Set the rate control fields in the given descriptor based on
* the bf_state fields and node state.
*
* The bfs fields should already be set with the relevant rate
* control information, including whether MRR is to be enabled.
*
* Since the FreeBSD HAL currently sets up the first TX rate
* in ath_hal_setuptxdesc(), this will setup the MRR
* conditionally for the pre-11n chips, and call ath_buf_set_rate
* unconditionally for 11n chips. These require the 11n rate
* scenario to be set if MCS rates are enabled, so it's easier
* to just always call it. The caller can then only set rates 2, 3
* and 4 if multi-rate retry is needed.
*/
static void
ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_buf *bf)
{
struct ath_rc_series *rc = bf->bf_state.bfs_rc;
/* If mrr is disabled, blank tries 1, 2, 3 */
if (! bf->bf_state.bfs_ismrr)
rc[1].tries = rc[2].tries = rc[3].tries = 0;
#if 0
/*
* If NOACK is set, just set ntries=1.
*/
else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
rc[1].tries = rc[2].tries = rc[3].tries = 0;
rc[0].tries = 1;
}
#endif
/*
* Always call - that way a retried descriptor will
* have the MRR fields overwritten.
*
* XXX TODO: see if this is really needed - setting up
* the first descriptor should set the MRR fields to 0
* for us anyway.
*/
if (ath_tx_is_11n(sc)) {
ath_buf_set_rate(sc, ni, bf);
} else {
ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
, rc[1].ratecode, rc[1].tries
, rc[2].ratecode, rc[2].tries
, rc[3].ratecode, rc[3].tries
);
}
}
/*
* Setup segments+descriptors for an 11n aggregate.
* bf_first is the first buffer in the aggregate.
* The descriptor list must already been linked together using
* bf->bf_next.
*/
static void
ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
{
struct ath_buf *bf, *bf_prev = NULL;
struct ath_desc *ds0 = bf_first->bf_desc;
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
__func__, bf_first->bf_state.bfs_nframes,
bf_first->bf_state.bfs_al);
bf = bf_first;
if (bf->bf_state.bfs_txrate0 == 0)
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
__func__, bf, 0);
if (bf->bf_state.bfs_rc[0].ratecode == 0)
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
__func__, bf, 0);
/*
* Setup all descriptors of all subframes - this will
* call ath_hal_set11naggrmiddle() on every frame.
*/
while (bf != NULL) {
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
__func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
SEQNO(bf->bf_state.bfs_seqno));
/*
* Setup the initial fields for the first descriptor - all
* the non-11n specific stuff.
*/
ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
, bf->bf_state.bfs_pktlen /* packet length */
, bf->bf_state.bfs_hdrlen /* header length */
, bf->bf_state.bfs_atype /* Atheros packet type */
, bf->bf_state.bfs_txpower /* txpower */
, bf->bf_state.bfs_txrate0
, bf->bf_state.bfs_try0 /* series 0 rate/tries */
, bf->bf_state.bfs_keyix /* key cache index */
, bf->bf_state.bfs_txantenna /* antenna mode */
, bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */
, bf->bf_state.bfs_ctsrate /* rts/cts rate */
, bf->bf_state.bfs_ctsduration /* rts/cts duration */
);
/*
* First descriptor? Setup the rate control and initial
* aggregate header information.
*/
if (bf == bf_first) {
/*
* setup first desc with rate and aggr info
*/
ath_tx_set_ratectrl(sc, bf->bf_node, bf);
}
/*
* Setup the descriptors for a multi-descriptor frame.
* This is both aggregate and non-aggregate aware.
*/
ath_tx_chaindesclist(sc, ds0, bf,
1, /* is_aggr */
!! (bf == bf_first), /* is_first_subframe */
!! (bf->bf_next == NULL) /* is_last_subframe */
);
if (bf == bf_first) {
/*
* Initialise the first 11n aggregate with the
* aggregate length and aggregate enable bits.
*/
ath_hal_set11n_aggr_first(sc->sc_ah,
ds0,
bf->bf_state.bfs_al,
bf->bf_state.bfs_ndelim);
}
/*
* Link the last descriptor of the previous frame
* to the beginning descriptor of this frame.
*/
if (bf_prev != NULL)
ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
bf->bf_daddr);
/* Save a copy so we can link the next descriptor in */
bf_prev = bf;
bf = bf->bf_next;
}
/*
* Set the first descriptor bf_lastds field to point to
* the last descriptor in the last subframe, that's where
* the status update will occur.
*/
bf_first->bf_lastds = bf_prev->bf_lastds;
/*
* And bf_last in the first descriptor points to the end of
* the aggregate list.
*/
bf_first->bf_last = bf_prev;
/*
* For non-AR9300 NICs, which require the rate control
* in the final descriptor - let's set that up now.
*
* This is because the filltxdesc() HAL call doesn't
* populate the last segment with rate control information
* if firstSeg is also true. For non-aggregate frames
* that is fine, as the first frame already has rate control
* info. But if the last frame in an aggregate has one
* descriptor, both firstseg and lastseg will be true and
* the rate info isn't copied.
*
* This is inefficient on MIPS/ARM platforms that have
* non-cachable memory for TX descriptors, but we'll just
* make do for now.
*
* As to why the rate table is stashed in the last descriptor
* rather than the first descriptor? Because proctxdesc()
* is called on the final descriptor in an MPDU or A-MPDU -
* ie, the one that gets updated by the hardware upon
* completion. That way proctxdesc() doesn't need to know
* about the first _and_ last TX descriptor.
*/
ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
}
/*
* Hand-off a frame to the multicast TX queue.
*
* This is a software TXQ which will be appended to the CAB queue
* during the beacon setup code.
*
* XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
* as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
* with the actual hardware txq, or all of this will fall apart.
*
* XXX It may not be a bad idea to just stuff the QCU ID into bf_state
* and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
* correctly.
*/
static void
ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf)
{
ATH_TX_LOCK_ASSERT(sc);
KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
("%s: busy status 0x%x", __func__, bf->bf_flags));
/*
* Ensure that the tx queue is the cabq, so things get
* mapped correctly.
*/
if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
__func__, bf, bf->bf_state.bfs_tx_queue,
txq->axq_qnum);
}
ATH_TXQ_LOCK(txq);
if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
struct ieee80211_frame *wh;
/* mark previous frame */
wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
BUS_DMASYNC_PREWRITE);
/* link descriptor */
ath_hal_settxdesclink(sc->sc_ah,
bf_last->bf_lastds,
bf->bf_daddr);
}
ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
ATH_TXQ_UNLOCK(txq);
}
/*
* Hand-off packet to a hardware queue.
*/
static void
ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf)
{
struct ath_hal *ah = sc->sc_ah;
struct ath_buf *bf_first;
/*
* Insert the frame on the outbound list and pass it on
* to the hardware. Multicast frames buffered for power
* save stations and transmit from the CAB queue are stored
* on a s/w only queue and loaded on to the CAB queue in
* the SWBA handler since frames only go out on DTIM and
* to avoid possible races.
*/
ATH_TX_LOCK_ASSERT(sc);
KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
("%s: busy status 0x%x", __func__, bf->bf_flags));
KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
("ath_tx_handoff_hw called for mcast queue"));
/*
* XXX We should instead just verify that sc_txstart_cnt
* or ath_txproc_cnt > 0. That would mean that
* the reset is going to be waiting for us to complete.
*/
if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) {
device_printf(sc->sc_dev,
"%s: TX dispatch without holding txcount/txstart refcnt!\n",
__func__);
}
/*
* XXX .. this is going to cause the hardware to get upset;
* so we really should find some way to drop or queue
* things.
*/
ATH_TXQ_LOCK(txq);
/*
* XXX TODO: if there's a holdingbf, then
* ATH_TXQ_PUTRUNNING should be clear.
*
* If there is a holdingbf and the list is empty,
* then axq_link should be pointing to the holdingbf.
*
* Otherwise it should point to the last descriptor
* in the last ath_buf.
*
* In any case, we should really ensure that we
* update the previous descriptor link pointer to
* this descriptor, regardless of all of the above state.
*
* For now this is captured by having axq_link point
* to either the holdingbf (if the TXQ list is empty)
* or the end of the list (if the TXQ list isn't empty.)
* I'd rather just kill axq_link here and do it as above.
*/
/*
* Append the frame to the TX queue.
*/
ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
ATH_KTR(sc, ATH_KTR_TX, 3,
"ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
"depth=%d",
txq->axq_qnum,
bf,
txq->axq_depth);
/*
* If there's a link pointer, update it.
*
* XXX we should replace this with the above logic, just
* to kill axq_link with fire.
*/
if (txq->axq_link != NULL) {
*txq->axq_link = bf->bf_daddr;
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
txq->axq_qnum, txq->axq_link,
(caddr_t)bf->bf_daddr, bf->bf_desc,
txq->axq_depth);
ATH_KTR(sc, ATH_KTR_TX, 5,
"ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
"lastds=%d",
txq->axq_qnum, txq->axq_link,
(caddr_t)bf->bf_daddr, bf->bf_desc,
bf->bf_lastds);
}
/*
* If we've not pushed anything into the hardware yet,
* push the head of the queue into the TxDP.
*
* Once we've started DMA, there's no guarantee that
* updating the TxDP with a new value will actually work.
* So we just don't do that - if we hit the end of the list,
* we keep that buffer around (the "holding buffer") and
* re-start DMA by updating the link pointer of _that_
* descriptor and then restart DMA.
*/
if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
bf_first = TAILQ_FIRST(&txq->axq_q);
txq->axq_flags |= ATH_TXQ_PUTRUNNING;
ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: TXDP[%u] = %p (%p) depth %d\n",
__func__, txq->axq_qnum,
(caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
txq->axq_depth);
ATH_KTR(sc, ATH_KTR_TX, 5,
"ath_tx_handoff: TXDP[%u] = %p (%p) "
"lastds=%p depth %d",
txq->axq_qnum,
(caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
bf_first->bf_lastds,
txq->axq_depth);
}
/*
* Ensure that the bf TXQ matches this TXQ, so later
* checking and holding buffer manipulation is sane.
*/
if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
__func__, bf, bf->bf_state.bfs_tx_queue,
txq->axq_qnum);
}
/*
* Track aggregate queue depth.
*/
if (bf->bf_state.bfs_aggr)
txq->axq_aggr_depth++;
/*
* Update the link pointer.
*/
ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
/*
* Start DMA.
*
* If we wrote a TxDP above, DMA will start from here.
*
* If DMA is running, it'll do nothing.
*
* If the DMA engine hit the end of the QCU list (ie LINK=NULL,
* or VEOL) then it stops at the last transmitted write.
* We then append a new frame by updating the link pointer
* in that descriptor and then kick TxE here; it will re-read
* that last descriptor and find the new descriptor to transmit.
*
* This is why we keep the holding descriptor around.
*/
ath_hal_txstart(ah, txq->axq_qnum);
ATH_TXQ_UNLOCK(txq);
ATH_KTR(sc, ATH_KTR_TX, 1,
"ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
}
/*
* Restart TX DMA for the given TXQ.
*
* This must be called whether the queue is empty or not.
*/
static void
ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_buf *bf, *bf_last;
ATH_TXQ_LOCK_ASSERT(txq);
/* XXX make this ATH_TXQ_FIRST */
bf = TAILQ_FIRST(&txq->axq_q);
bf_last = ATH_TXQ_LAST(txq, axq_q_s);
if (bf == NULL)
return;
DPRINTF(sc, ATH_DEBUG_RESET,
"%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
__func__,
txq->axq_qnum,
bf,
bf_last,
(uint32_t) bf->bf_daddr);
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RESET)
ath_tx_dump(sc, txq);
#endif
/*
* This is called from a restart, so DMA is known to be
* completely stopped.
*/
KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
("%s: Q%d: called with PUTRUNNING=1\n",
__func__,
txq->axq_qnum));
ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
txq->axq_flags |= ATH_TXQ_PUTRUNNING;
ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
&txq->axq_link);
ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
}
/*
* Hand off a packet to the hardware (or mcast queue.)
*
* The relevant hardware txq should be locked.
*/
static void
ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf)
{
ATH_TX_LOCK_ASSERT(sc);
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
ath_tx_alq_post(sc, bf);
#endif
if (txq->axq_qnum == ATH_TXQ_SWQ)
ath_tx_handoff_mcast(sc, txq, bf);
else
ath_tx_handoff_hw(sc, txq, bf);
}
static int
ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
int *keyix)
{
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
__func__,
*hdrlen,
*pktlen,
isfrag,
iswep,
m0);
if (iswep) {
const struct ieee80211_cipher *cip;
struct ieee80211_key *k;
/*
* Construct the 802.11 header+trailer for an encrypted
* frame. The only reason this can fail is because of an
* unknown or unsupported cipher/key type.
*/
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
/*
* This can happen when the key is yanked after the
* frame was queued. Just discard the frame; the
* 802.11 layer counts failures and provides
* debugging/diagnostics.
*/
return (0);
}
/*
* Adjust the packet + header lengths for the crypto
* additions and calculate the h/w key index. When
* a s/w mic is done the frame will have had any mic
* added to it prior to entry so m0->m_pkthdr.len will
* account for it. Otherwise we need to add it to the
* packet length.
*/
cip = k->wk_cipher;
(*hdrlen) += cip->ic_header;
(*pktlen) += cip->ic_header + cip->ic_trailer;
/* NB: frags always have any TKIP MIC done in s/w */
if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
(*pktlen) += cip->ic_miclen;
(*keyix) = k->wk_keyix;
} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
/*
* Use station key cache slot, if assigned.
*/
(*keyix) = ni->ni_ucastkey.wk_keyix;
if ((*keyix) == IEEE80211_KEYIX_NONE)
(*keyix) = HAL_TXKEYIX_INVALID;
} else
(*keyix) = HAL_TXKEYIX_INVALID;
return (1);
}
/*
* Calculate whether interoperability protection is required for
* this frame.
*
* This requires the rate control information be filled in,
* as the protection requirement depends upon the current
* operating mode / PHY.
*/
static void
ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
{
struct ieee80211_frame *wh;
uint8_t rix;
uint16_t flags;
int shortPreamble;
const HAL_RATE_TABLE *rt = sc->sc_currates;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
flags = bf->bf_state.bfs_txflags;
rix = bf->bf_state.bfs_rc[0].rix;
shortPreamble = bf->bf_state.bfs_shpream;
wh = mtod(bf->bf_m, struct ieee80211_frame *);
/*
* If 802.11g protection is enabled, determine whether
* to use RTS/CTS or just CTS. Note that this is only
* done for OFDM unicast frames.
*/
if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
rt->info[rix].phy == IEEE80211_T_OFDM &&
(flags & HAL_TXDESC_NOACK) == 0) {
bf->bf_state.bfs_doprot = 1;
/* XXX fragments must use CCK rates w/ protection */
if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
flags |= HAL_TXDESC_RTSENA;
} else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
flags |= HAL_TXDESC_CTSENA;
}
/*
* For frags it would be desirable to use the
* highest CCK rate for RTS/CTS. But stations
* farther away may detect it at a lower CCK rate
* so use the configured protection rate instead
* (for now).
*/
sc->sc_stats.ast_tx_protect++;
}
/*
* If 11n protection is enabled and it's a HT frame,
* enable RTS.
*
* XXX ic_htprotmode or ic_curhtprotmode?
* XXX should it_htprotmode only matter if ic_curhtprotmode
* XXX indicates it's not a HT pure environment?
*/
if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
rt->info[rix].phy == IEEE80211_T_HT &&
(flags & HAL_TXDESC_NOACK) == 0) {
flags |= HAL_TXDESC_RTSENA;
sc->sc_stats.ast_tx_htprotect++;
}
bf->bf_state.bfs_txflags = flags;
}
/*
* Update the frame duration given the currently selected rate.
*
* This also updates the frame duration value, so it will require
* a DMA flush.
*/
static void
ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
{
struct ieee80211_frame *wh;
uint8_t rix;
uint16_t flags;
int shortPreamble;
struct ath_hal *ah = sc->sc_ah;
const HAL_RATE_TABLE *rt = sc->sc_currates;
int isfrag = bf->bf_m->m_flags & M_FRAG;
flags = bf->bf_state.bfs_txflags;
rix = bf->bf_state.bfs_rc[0].rix;
shortPreamble = bf->bf_state.bfs_shpream;
wh = mtod(bf->bf_m, struct ieee80211_frame *);
/*
* Calculate duration. This logically belongs in the 802.11
* layer but it lacks sufficient information to calculate it.
*/
if ((flags & HAL_TXDESC_NOACK) == 0 &&
(wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
u_int16_t dur;
if (shortPreamble)
dur = rt->info[rix].spAckDuration;
else
dur = rt->info[rix].lpAckDuration;
if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
dur += dur; /* additional SIFS+ACK */
/*
* Include the size of next fragment so NAV is
* updated properly. The last fragment uses only
* the ACK duration
*
* XXX TODO: ensure that the rate lookup for each
* fragment is the same as the rate used by the
* first fragment!
*/
dur += ath_hal_computetxtime(ah,
rt,
bf->bf_nextfraglen,
rix, shortPreamble);
}
if (isfrag) {
/*
* Force hardware to use computed duration for next
* fragment by disabling multi-rate retry which updates
* duration based on the multi-rate duration table.
*/
bf->bf_state.bfs_ismrr = 0;
bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
/* XXX update bfs_rc[0].try? */
}
/* Update the duration field itself */
*(u_int16_t *)wh->i_dur = htole16(dur);
}
}
static uint8_t
ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
int cix, int shortPreamble)
{
uint8_t ctsrate;
/*
* CTS transmit rate is derived from the transmit rate
* by looking in the h/w rate table. We must also factor
* in whether or not a short preamble is to be used.
*/
/* NB: cix is set above where RTS/CTS is enabled */
KASSERT(cix != 0xff, ("cix not setup"));
ctsrate = rt->info[cix].rateCode;
/* XXX this should only matter for legacy rates */
if (shortPreamble)
ctsrate |= rt->info[cix].shortPreamble;
return (ctsrate);
}
/*
* Calculate the RTS/CTS duration for legacy frames.
*/
static int
ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
int flags)
{
int ctsduration = 0;
/* This mustn't be called for HT modes */
if (rt->info[cix].phy == IEEE80211_T_HT) {
printf("%s: HT rate where it shouldn't be (0x%x)\n",
__func__, rt->info[cix].rateCode);
return (-1);
}
/*
* Compute the transmit duration based on the frame
* size and the size of an ACK frame. We call into the
* HAL to do the computation since it depends on the
* characteristics of the actual PHY being used.
*
* NB: CTS is assumed the same size as an ACK so we can
* use the precalculated ACK durations.
*/
if (shortPreamble) {
if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
ctsduration += rt->info[cix].spAckDuration;
ctsduration += ath_hal_computetxtime(ah,
rt, pktlen, rix, AH_TRUE);
if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
ctsduration += rt->info[rix].spAckDuration;
} else {
if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
ctsduration += rt->info[cix].lpAckDuration;
ctsduration += ath_hal_computetxtime(ah,
rt, pktlen, rix, AH_FALSE);
if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
ctsduration += rt->info[rix].lpAckDuration;
}
return (ctsduration);
}
/*
* Update the given ath_buf with updated rts/cts setup and duration
* values.
*
* To support rate lookups for each software retry, the rts/cts rate
* and cts duration must be re-calculated.
*
* This function assumes the RTS/CTS flags have been set as needed;
* mrr has been disabled; and the rate control lookup has been done.
*
* XXX TODO: MRR need only be disabled for the pre-11n NICs.
* XXX The 11n NICs support per-rate RTS/CTS configuration.
*/
static void
ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
{
uint16_t ctsduration = 0;
uint8_t ctsrate = 0;
uint8_t rix = bf->bf_state.bfs_rc[0].rix;
uint8_t cix = 0;
const HAL_RATE_TABLE *rt = sc->sc_currates;
/*
* No RTS/CTS enabled? Don't bother.
*/
if ((bf->bf_state.bfs_txflags &
(HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
/* XXX is this really needed? */
bf->bf_state.bfs_ctsrate = 0;
bf->bf_state.bfs_ctsduration = 0;
return;
}
/*
* If protection is enabled, use the protection rix control
* rate. Otherwise use the rate0 control rate.
*/
if (bf->bf_state.bfs_doprot)
rix = sc->sc_protrix;
else
rix = bf->bf_state.bfs_rc[0].rix;
/*
* If the raw path has hard-coded ctsrate0 to something,
* use it.
*/
if (bf->bf_state.bfs_ctsrate0 != 0)
cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
else
/* Control rate from above */
cix = rt->info[rix].controlRate;
/* Calculate the rtscts rate for the given cix */
ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
bf->bf_state.bfs_shpream);
/* The 11n chipsets do ctsduration calculations for you */
if (! ath_tx_is_11n(sc))
ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
rt, bf->bf_state.bfs_txflags);
/* Squirrel away in ath_buf */
bf->bf_state.bfs_ctsrate = ctsrate;
bf->bf_state.bfs_ctsduration = ctsduration;
/*
* Must disable multi-rate retry when using RTS/CTS.
*/
if (!sc->sc_mrrprot) {
bf->bf_state.bfs_ismrr = 0;
bf->bf_state.bfs_try0 =
bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
}
}
/*
* Setup the descriptor chain for a normal or fast-frame
* frame.
*
* XXX TODO: extend to include the destination hardware QCU ID.
* Make sure that is correct. Make sure that when being added
* to the mcastq, the CABQ QCUID is set or things will get a bit
* odd.
*/
static void
ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
{
struct ath_desc *ds = bf->bf_desc;
struct ath_hal *ah = sc->sc_ah;
if (bf->bf_state.bfs_txrate0 == 0)
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
ath_hal_setuptxdesc(ah, ds
, bf->bf_state.bfs_pktlen /* packet length */
, bf->bf_state.bfs_hdrlen /* header length */
, bf->bf_state.bfs_atype /* Atheros packet type */
, bf->bf_state.bfs_txpower /* txpower */
, bf->bf_state.bfs_txrate0
, bf->bf_state.bfs_try0 /* series 0 rate/tries */
, bf->bf_state.bfs_keyix /* key cache index */
, bf->bf_state.bfs_txantenna /* antenna mode */
, bf->bf_state.bfs_txflags /* flags */
, bf->bf_state.bfs_ctsrate /* rts/cts rate */
, bf->bf_state.bfs_ctsduration /* rts/cts duration */
);
/*
* This will be overriden when the descriptor chain is written.
*/
bf->bf_lastds = ds;
bf->bf_last = bf;
/* Set rate control and descriptor chain for this frame */
ath_tx_set_ratectrl(sc, bf->bf_node, bf);
ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
}
/*
* Do a rate lookup.
*
* This performs a rate lookup for the given ath_buf only if it's required.
* Non-data frames and raw frames don't require it.
*
* This populates the primary and MRR entries; MRR values are
* then disabled later on if something requires it (eg RTS/CTS on
* pre-11n chipsets.
*
* This needs to be done before the RTS/CTS fields are calculated
* as they may depend upon the rate chosen.
*/
static void
ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
{
uint8_t rate, rix;
int try0;
if (! bf->bf_state.bfs_doratelookup)
return;
/* Get rid of any previous state */
bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
/* In case MRR is disabled, make sure rc[0] is setup correctly */
bf->bf_state.bfs_rc[0].rix = rix;
bf->bf_state.bfs_rc[0].ratecode = rate;
bf->bf_state.bfs_rc[0].tries = try0;
if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
bf->bf_state.bfs_rc);
ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
sc->sc_txrix = rix; /* for LED blinking */
sc->sc_lastdatarix = rix; /* for fast frames */
bf->bf_state.bfs_try0 = try0;
bf->bf_state.bfs_txrate0 = rate;
}
/*
* Update the CLRDMASK bit in the ath_buf if it needs to be set.
*/
static void
ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
struct ath_buf *bf)
{
struct ath_node *an = ATH_NODE(bf->bf_node);
ATH_TX_LOCK_ASSERT(sc);
if (an->clrdmask == 1) {
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
an->clrdmask = 0;
}
}
/*
* Return whether this frame should be software queued or
* direct dispatched.
*
* When doing powersave, BAR frames should be queued but other management
* frames should be directly sent.
*
* When not doing powersave, stick BAR frames into the hardware queue
* so it goes out even though the queue is paused.
*
* For now, management frames are also software queued by default.
*/
static int
ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
struct mbuf *m0, int *queue_to_head)
{
struct ieee80211_node *ni = &an->an_node;
struct ieee80211_frame *wh;
uint8_t type, subtype;
wh = mtod(m0, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
(*queue_to_head) = 0;
/* If it's not in powersave - direct-dispatch BAR */
if ((ATH_NODE(ni)->an_is_powersave == 0)
&& type == IEEE80211_FC0_TYPE_CTL &&
subtype == IEEE80211_FC0_SUBTYPE_BAR) {
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: BAR: TX'ing direct\n", __func__);
return (0);
} else if ((ATH_NODE(ni)->an_is_powersave == 1)
&& type == IEEE80211_FC0_TYPE_CTL &&
subtype == IEEE80211_FC0_SUBTYPE_BAR) {
/* BAR TX whilst asleep; queue */
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: swq: TX'ing\n", __func__);
(*queue_to_head) = 1;
return (1);
} else if ((ATH_NODE(ni)->an_is_powersave == 1)
&& (type == IEEE80211_FC0_TYPE_MGT ||
type == IEEE80211_FC0_TYPE_CTL)) {
/*
* Other control/mgmt frame; bypass software queuing
* for now!
*/
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: %6D: Node is asleep; sending mgmt "
"(type=%d, subtype=%d)\n",
__func__, ni->ni_macaddr, ":", type, subtype);
return (0);
} else {
return (1);
}
}
/*
* Transmit the given frame to the hardware.
*
* The frame must already be setup; rate control must already have
* been done.
*
* XXX since the TXQ lock is being held here (and I dislike holding
* it for this long when not doing software aggregation), later on
* break this function into "setup_normal" and "xmit_normal". The
* lock only needs to be held for the ath_tx_handoff call.
*
* XXX we don't update the leak count here - if we're doing
* direct frame dispatch, we need to be able to do it without
* decrementing the leak count (eg multicast queue frames.)
*/
static void
ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf)
{
struct ath_node *an = ATH_NODE(bf->bf_node);
struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
ATH_TX_LOCK_ASSERT(sc);
/*
* For now, just enable CLRDMASK. ath_tx_xmit_normal() does
* set a completion handler however it doesn't (yet) properly
* handle the strict ordering requirements needed for normal,
* non-aggregate session frames.
*
* Once this is implemented, only set CLRDMASK like this for
* frames that must go out - eg management/raw frames.
*/
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
/* Setup the descriptor before handoff */
ath_tx_do_ratelookup(sc, bf);
ath_tx_calc_duration(sc, bf);
ath_tx_calc_protection(sc, bf);
ath_tx_set_rtscts(sc, bf);
ath_tx_rate_fill_rcflags(sc, bf);
ath_tx_setds(sc, bf);
/* Track per-TID hardware queue depth correctly */
tid->hwq_depth++;
/* Assign the completion handler */
bf->bf_comp = ath_tx_normal_comp;
/* Hand off to hardware */
ath_tx_handoff(sc, txq, bf);
}
/*
* Do the basic frame setup stuff that's required before the frame
* is added to a software queue.
*
* All frames get mostly the same treatment and it's done once.
* Retransmits fiddle with things like the rate control setup,
* setting the retransmit bit in the packet; doing relevant DMA/bus
* syncing and relinking it (back) into the hardware TX queue.
*
* Note that this may cause the mbuf to be reallocated, so
* m0 may not be valid.
*/
static int
ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ath_hal *ah = sc->sc_ah;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
int error, iswep, ismcast, isfrag, ismrr;
int keyix, hdrlen, pktlen, try0 = 0;
u_int8_t rix = 0, txrate = 0;
struct ath_desc *ds;
struct ieee80211_frame *wh;
u_int subtype, flags;
HAL_PKT_TYPE atype;
const HAL_RATE_TABLE *rt;
HAL_BOOL shortPreamble;
struct ath_node *an;
u_int pri;
/*
* To ensure that both sequence numbers and the CCMP PN handling
* is "correct", make sure that the relevant TID queue is locked.
* Otherwise the CCMP PN and seqno may appear out of order, causing
* re-ordered frames to have out of order CCMP PN's, resulting
* in many, many frame drops.
*/
ATH_TX_LOCK_ASSERT(sc);
wh = mtod(m0, struct ieee80211_frame *);
iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
isfrag = m0->m_flags & M_FRAG;
hdrlen = ieee80211_anyhdrsize(wh);
/*
* Packet length must not include any
* pad bytes; deduct them here.
*/
pktlen = m0->m_pkthdr.len - (hdrlen & 3);
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
&pktlen, &keyix)) {
ath_freetx(m0);
return EIO;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
pktlen += IEEE80211_CRC_LEN;
/*
* Load the DMA map so any coalescing is done. This
* also calculates the number of descriptors we need.
*/
error = ath_tx_dmasetup(sc, bf, m0);
if (error != 0)
return error;
KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
bf->bf_node = ni; /* NB: held reference */
m0 = bf->bf_m; /* NB: may have changed */
wh = mtod(m0, struct ieee80211_frame *);
/* setup descriptors */
ds = bf->bf_desc;
rt = sc->sc_currates;
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
/*
* NB: the 802.11 layer marks whether or not we should
* use short preamble based on the current mode and
* negotiated parameters.
*/
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
shortPreamble = AH_TRUE;
sc->sc_stats.ast_tx_shortpre++;
} else {
shortPreamble = AH_FALSE;
}
an = ATH_NODE(ni);
//flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
flags = 0;
ismrr = 0; /* default no multi-rate retry*/
pri = M_WME_GETAC(m0); /* honor classification */
/* XXX use txparams instead of fixed values */
/*
* Calculate Atheros packet type from IEEE80211 packet header,
* setup for rate calculations, and select h/w transmit queue.
*/
switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
case IEEE80211_FC0_TYPE_MGT:
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
atype = HAL_PKT_TYPE_BEACON;
else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
atype = HAL_PKT_TYPE_PROBE_RESP;
else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
atype = HAL_PKT_TYPE_ATIM;
else
atype = HAL_PKT_TYPE_NORMAL; /* XXX */
rix = an->an_mgmtrix;
txrate = rt->info[rix].rateCode;
if (shortPreamble)
txrate |= rt->info[rix].shortPreamble;
try0 = ATH_TXMGTTRY;
flags |= HAL_TXDESC_INTREQ; /* force interrupt */
break;
case IEEE80211_FC0_TYPE_CTL:
atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
rix = an->an_mgmtrix;
txrate = rt->info[rix].rateCode;
if (shortPreamble)
txrate |= rt->info[rix].shortPreamble;
try0 = ATH_TXMGTTRY;
flags |= HAL_TXDESC_INTREQ; /* force interrupt */
break;
case IEEE80211_FC0_TYPE_DATA:
atype = HAL_PKT_TYPE_NORMAL; /* default */
/*
* Data frames: multicast frames go out at a fixed rate,
* EAPOL frames use the mgmt frame rate; otherwise consult
* the rate control module for the rate to use.
*/
if (ismcast) {
rix = an->an_mcastrix;
txrate = rt->info[rix].rateCode;
if (shortPreamble)
txrate |= rt->info[rix].shortPreamble;
try0 = 1;
} else if (m0->m_flags & M_EAPOL) {
/* XXX? maybe always use long preamble? */
rix = an->an_mgmtrix;
txrate = rt->info[rix].rateCode;
if (shortPreamble)
txrate |= rt->info[rix].shortPreamble;
try0 = ATH_TXMAXTRY; /* XXX?too many? */
} else {
/*
* Do rate lookup on each TX, rather than using
* the hard-coded TX information decided here.
*/
ismrr = 1;
bf->bf_state.bfs_doratelookup = 1;
}
if (cap->cap_wmeParams[pri].wmep_noackPolicy)
flags |= HAL_TXDESC_NOACK;
break;
default:
device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
/* XXX statistic */
/* XXX free tx dmamap */
ath_freetx(m0);
return EIO;
}
/*
* There are two known scenarios where the frame AC doesn't match
* what the destination TXQ is.
*
* + non-QoS frames (eg management?) that the net80211 stack has
* assigned a higher AC to, but since it's a non-QoS TID, it's
* being thrown into TID 16. TID 16 gets the AC_BE queue.
* It's quite possible that management frames should just be
* direct dispatched to hardware rather than go via the software
* queue; that should be investigated in the future. There are
* some specific scenarios where this doesn't make sense, mostly
* surrounding ADDBA request/response - hence why that is special
* cased.
*
* + Multicast frames going into the VAP mcast queue. That shows up
* as "TXQ 11".
*
* This driver should eventually support separate TID and TXQ locking,
* allowing for arbitrary AC frames to appear on arbitrary software
* queues, being queued to the "correct" hardware queue when needed.
*/
#if 0
if (txq != sc->sc_ac2q[pri]) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
__func__,
txq,
txq->axq_qnum,
pri,
sc->sc_ac2q[pri],
sc->sc_ac2q[pri]->axq_qnum);
}
#endif
/*
* Calculate miscellaneous flags.
*/
if (ismcast) {
flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
} else if (pktlen > vap->iv_rtsthreshold &&
(ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
sc->sc_stats.ast_tx_rts++;
}
if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
sc->sc_stats.ast_tx_noack++;
#ifdef IEEE80211_SUPPORT_TDMA
if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
DPRINTF(sc, ATH_DEBUG_TDMA,
"%s: discard frame, ACK required w/ TDMA\n", __func__);
sc->sc_stats.ast_tdma_ack++;
/* XXX free tx dmamap */
ath_freetx(m0);
return EIO;
}
#endif
/*
* Determine if a tx interrupt should be generated for
* this descriptor. We take a tx interrupt to reap
* descriptors when the h/w hits an EOL condition or
* when the descriptor is specifically marked to generate
* an interrupt. We periodically mark descriptors in this
* way to insure timely replenishing of the supply needed
* for sending frames. Defering interrupts reduces system
* load and potentially allows more concurrent work to be
* done but if done to aggressively can cause senders to
* backup.
*
* NB: use >= to deal with sc_txintrperiod changing
* dynamically through sysctl.
*/
if (flags & HAL_TXDESC_INTREQ) {
txq->axq_intrcnt = 0;
} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
flags |= HAL_TXDESC_INTREQ;
txq->axq_intrcnt = 0;
}
/* This point forward is actual TX bits */
/*
* At this point we are committed to sending the frame
* and we don't need to look at m_nextpkt; clear it in
* case this frame is part of frag chain.
*/
m0->m_nextpkt = NULL;
if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
sc->sc_hwmap[rix].ieeerate, -1);
if (ieee80211_radiotap_active_vap(vap)) {
u_int64_t tsf = ath_hal_gettsf64(ah);
sc->sc_tx_th.wt_tsf = htole64(tsf);
sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
if (iswep)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
if (isfrag)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
ieee80211_radiotap_tx(vap, m0);
}
/* Blank the legacy rate array */
bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
/*
* ath_buf_set_rate needs at least one rate/try to setup
* the rate scenario.
*/
bf->bf_state.bfs_rc[0].rix = rix;
bf->bf_state.bfs_rc[0].tries = try0;
bf->bf_state.bfs_rc[0].ratecode = txrate;
/* Store the decided rate index values away */
bf->bf_state.bfs_pktlen = pktlen;
bf->bf_state.bfs_hdrlen = hdrlen;
bf->bf_state.bfs_atype = atype;
bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
bf->bf_state.bfs_txrate0 = txrate;
bf->bf_state.bfs_try0 = try0;
bf->bf_state.bfs_keyix = keyix;
bf->bf_state.bfs_txantenna = sc->sc_txantenna;
bf->bf_state.bfs_txflags = flags;
bf->bf_state.bfs_shpream = shortPreamble;
/* XXX this should be done in ath_tx_setrate() */
bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
bf->bf_state.bfs_ctsrate = 0; /* calculated later */
bf->bf_state.bfs_ctsduration = 0;
bf->bf_state.bfs_ismrr = ismrr;
return 0;
}
/*
* Queue a frame to the hardware or software queue.
*
* This can be called by the net80211 code.
*
* XXX what about locking? Or, push the seqno assign into the
* XXX aggregate scheduler so its serialised?
*
* XXX When sending management frames via ath_raw_xmit(),
* should CLRDMASK be set unconditionally?
*/
int
ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_buf *bf, struct mbuf *m0)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ath_vap *avp = ATH_VAP(vap);
int r = 0;
u_int pri;
int tid;
struct ath_txq *txq;
int ismcast;
const struct ieee80211_frame *wh;
int is_ampdu, is_ampdu_tx, is_ampdu_pending;
ieee80211_seq seqno;
uint8_t type, subtype;
int queue_to_head;
ATH_TX_LOCK_ASSERT(sc);
/*
* Determine the target hardware queue.
*
* For multicast frames, the txq gets overridden appropriately
* depending upon the state of PS.
*
* For any other frame, we do a TID/QoS lookup inside the frame
* to see what the TID should be. If it's a non-QoS frame, the
* AC and TID are overridden. The TID/TXQ code assumes the
* TID is on a predictable hardware TXQ, so we don't support
* having a node TID queued to multiple hardware TXQs.
* This may change in the future but would require some locking
* fudgery.
*/
pri = ath_tx_getac(sc, m0);
tid = ath_tx_gettid(sc, m0);
txq = sc->sc_ac2q[pri];
wh = mtod(m0, struct ieee80211_frame *);
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
/*
* Enforce how deep the multicast queue can grow.
*
* XXX duplicated in ath_raw_xmit().
*/
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
> sc->sc_txq_mcastq_maxdepth) {
sc->sc_stats.ast_tx_mcastq_overflow++;
m_freem(m0);
return (ENOBUFS);
}
}
/*
* Enforce how deep the unicast queue can grow.
*
* If the node is in power save then we don't want
* the software queue to grow too deep, or a node may
* end up consuming all of the ath_buf entries.
*
* For now, only do this for DATA frames.
*
* We will want to cap how many management/control
* frames get punted to the software queue so it doesn't
* fill up. But the correct solution isn't yet obvious.
* In any case, this check should at least let frames pass
* that we are direct-dispatching.
*
* XXX TODO: duplicate this to the raw xmit path!
*/
if (type == IEEE80211_FC0_TYPE_DATA &&
ATH_NODE(ni)->an_is_powersave &&
ATH_NODE(ni)->an_swq_depth >
sc->sc_txq_node_psq_maxdepth) {
sc->sc_stats.ast_tx_node_psq_overflow++;
m_freem(m0);
return (ENOBUFS);
}
/* A-MPDU TX */
is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
is_ampdu = is_ampdu_tx | is_ampdu_pending;
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
__func__, tid, pri, is_ampdu);
/* Set local packet state, used to queue packets to hardware */
bf->bf_state.bfs_tid = tid;
bf->bf_state.bfs_tx_queue = txq->axq_qnum;
bf->bf_state.bfs_pri = pri;
#if 1
/*
* When servicing one or more stations in power-save mode
* (or) if there is some mcast data waiting on the mcast
* queue (to prevent out of order delivery) multicast frames
* must be bufferd until after the beacon.
*
* TODO: we should lock the mcastq before we check the length.
*/
if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
txq = &avp->av_mcastq;
/*
* Mark the frame as eventually belonging on the CAB
* queue, so the descriptor setup functions will
* correctly initialise the descriptor 'qcuId' field.
*/
bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
}
#endif
/* Do the generic frame setup */
/* XXX should just bzero the bf_state? */
bf->bf_state.bfs_dobaw = 0;
/* A-MPDU TX? Manually set sequence number */
/*
* Don't do it whilst pending; the net80211 layer still
* assigns them.
*/
if (is_ampdu_tx) {
/*
* Always call; this function will
* handle making sure that null data frames
* don't get a sequence number from the current
* TID and thus mess with the BAW.
*/
seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
/*
* Don't add QoS NULL frames to the BAW.
*/
if (IEEE80211_QOS_HAS_SEQ(wh) &&
subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
bf->bf_state.bfs_dobaw = 1;
}
}
/*
* If needed, the sequence number has been assigned.
* Squirrel it away somewhere easy to get to.
*/
bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
/* Is ampdu pending? fetch the seqno and print it out */
if (is_ampdu_pending)
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: tid %d: ampdu pending, seqno %d\n",
__func__, tid, M_SEQNO_GET(m0));
/* This also sets up the DMA map */
r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
if (r != 0)
goto done;
/* At this point m0 could have changed! */
m0 = bf->bf_m;
#if 1
/*
* If it's a multicast frame, do a direct-dispatch to the
* destination hardware queue. Don't bother software
* queuing it.
*/
/*
* If it's a BAR frame, do a direct dispatch to the
* destination hardware queue. Don't bother software
* queuing it, as the TID will now be paused.
* Sending a BAR frame can occur from the net80211 txa timer
* (ie, retries) or from the ath txtask (completion call.)
* It queues directly to hardware because the TID is paused
* at this point (and won't be unpaused until the BAR has
* either been TXed successfully or max retries has been
* reached.)
*/
/*
* Until things are better debugged - if this node is asleep
* and we're sending it a non-BAR frame, direct dispatch it.
* Why? Because we need to figure out what's actually being
* sent - eg, during reassociation/reauthentication after
* the node (last) disappeared whilst asleep, the driver should
* have unpaused/unsleep'ed the node. So until that is
* sorted out, use this workaround.
*/
if (txq == &avp->av_mcastq) {
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
ath_tx_xmit_normal(sc, txq, bf);
} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
&queue_to_head)) {
ath_tx_swq(sc, ni, txq, queue_to_head, bf);
} else {
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
ath_tx_xmit_normal(sc, txq, bf);
}
#else
/*
* For now, since there's no software queue,
* direct-dispatch to the hardware.
*/
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
/*
* Update the current leak count if
* we're leaking frames; and set the
* MORE flag as appropriate.
*/
ath_tx_leak_count_update(sc, tid, bf);
ath_tx_xmit_normal(sc, txq, bf);
#endif
done:
return 0;
}
static int
ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_buf *bf, struct mbuf *m0,
const struct ieee80211_bpf_params *params)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ath_hal *ah = sc->sc_ah;
struct ieee80211vap *vap = ni->ni_vap;
int error, ismcast, ismrr;
int keyix, hdrlen, pktlen, try0, txantenna;
u_int8_t rix, txrate;
struct ieee80211_frame *wh;
u_int flags;
HAL_PKT_TYPE atype;
const HAL_RATE_TABLE *rt;
struct ath_desc *ds;
u_int pri;
int o_tid = -1;
int do_override;
uint8_t type, subtype;
int queue_to_head;
struct ath_node *an = ATH_NODE(ni);
ATH_TX_LOCK_ASSERT(sc);
wh = mtod(m0, struct ieee80211_frame *);
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
hdrlen = ieee80211_anyhdrsize(wh);
/*
* Packet length must not include any
* pad bytes; deduct them here.
*/
/* XXX honor IEEE80211_BPF_DATAPAD */
pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
ATH_KTR(sc, ATH_KTR_TX, 2,
"ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
__func__, ismcast);
pri = params->ibp_pri & 3;
/* Override pri if the frame isn't a QoS one */
if (! IEEE80211_QOS_HAS_SEQ(wh))
pri = ath_tx_getac(sc, m0);
/* XXX If it's an ADDBA, override the correct queue */
do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
/* Map ADDBA to the correct priority */
if (do_override) {
#if 0
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: overriding tid %d pri %d -> %d\n",
__func__, o_tid, pri, TID_TO_WME_AC(o_tid));
#endif
pri = TID_TO_WME_AC(o_tid);
}
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni,
m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
&hdrlen, &pktlen, &keyix)) {
ath_freetx(m0);
return EIO;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
/* Do the generic frame setup */
/* XXX should just bzero the bf_state? */
bf->bf_state.bfs_dobaw = 0;
error = ath_tx_dmasetup(sc, bf, m0);
if (error != 0)
return error;
m0 = bf->bf_m; /* NB: may have changed */
wh = mtod(m0, struct ieee80211_frame *);
KASSERT((ni != NULL), ("%s: ni=NULL!", __func__));
bf->bf_node = ni; /* NB: held reference */
/* Always enable CLRDMASK for raw frames for now.. */
flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
flags |= HAL_TXDESC_INTREQ; /* force interrupt */
if (params->ibp_flags & IEEE80211_BPF_RTS)
flags |= HAL_TXDESC_RTSENA;
else if (params->ibp_flags & IEEE80211_BPF_CTS) {
/* XXX assume 11g/11n protection? */
bf->bf_state.bfs_doprot = 1;
flags |= HAL_TXDESC_CTSENA;
}
/* XXX leave ismcast to injector? */
if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
flags |= HAL_TXDESC_NOACK;
rt = sc->sc_currates;
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
/* Fetch first rate information */
rix = ath_tx_findrix(sc, params->ibp_rate0);
try0 = params->ibp_try0;
/*
* Override EAPOL rate as appropriate.
*/
if (m0->m_flags & M_EAPOL) {
/* XXX? maybe always use long preamble? */
rix = an->an_mgmtrix;
try0 = ATH_TXMAXTRY; /* XXX?too many? */
}
txrate = rt->info[rix].rateCode;
if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
txrate |= rt->info[rix].shortPreamble;
sc->sc_txrix = rix;
ismrr = (params->ibp_try1 != 0);
txantenna = params->ibp_pri >> 2;
if (txantenna == 0) /* XXX? */
txantenna = sc->sc_txantenna;
/*
* Since ctsrate is fixed, store it away for later
* use when the descriptor fields are being set.
*/
if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
/*
* NB: we mark all packets as type PSPOLL so the h/w won't
* set the sequence number, duration, etc.
*/
atype = HAL_PKT_TYPE_PSPOLL;
if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
sc->sc_hwmap[rix].ieeerate, -1);
if (ieee80211_radiotap_active_vap(vap)) {
u_int64_t tsf = ath_hal_gettsf64(ah);
sc->sc_tx_th.wt_tsf = htole64(tsf);
sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
if (m0->m_flags & M_FRAG)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
ieee80211_get_node_txpower(ni));
sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
ieee80211_radiotap_tx(vap, m0);
}
/*
* Formulate first tx descriptor with tx controls.
*/
ds = bf->bf_desc;
/* XXX check return value? */
/* Store the decided rate index values away */
bf->bf_state.bfs_pktlen = pktlen;
bf->bf_state.bfs_hdrlen = hdrlen;
bf->bf_state.bfs_atype = atype;
bf->bf_state.bfs_txpower = MIN(params->ibp_power,
ieee80211_get_node_txpower(ni));
bf->bf_state.bfs_txrate0 = txrate;
bf->bf_state.bfs_try0 = try0;
bf->bf_state.bfs_keyix = keyix;
bf->bf_state.bfs_txantenna = txantenna;
bf->bf_state.bfs_txflags = flags;
bf->bf_state.bfs_shpream =
!! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
/* Set local packet state, used to queue packets to hardware */
bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
bf->bf_state.bfs_pri = pri;
/* XXX this should be done in ath_tx_setrate() */
bf->bf_state.bfs_ctsrate = 0;
bf->bf_state.bfs_ctsduration = 0;
bf->bf_state.bfs_ismrr = ismrr;
/* Blank the legacy rate array */
bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
bf->bf_state.bfs_rc[0].rix = rix;
bf->bf_state.bfs_rc[0].tries = try0;
bf->bf_state.bfs_rc[0].ratecode = txrate;
if (ismrr) {
int rix;
rix = ath_tx_findrix(sc, params->ibp_rate1);
bf->bf_state.bfs_rc[1].rix = rix;
bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
rix = ath_tx_findrix(sc, params->ibp_rate2);
bf->bf_state.bfs_rc[2].rix = rix;
bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
rix = ath_tx_findrix(sc, params->ibp_rate3);
bf->bf_state.bfs_rc[3].rix = rix;
bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
}
/*
* All the required rate control decisions have been made;
* fill in the rc flags.
*/
ath_tx_rate_fill_rcflags(sc, bf);
/* NB: no buffered multicast in power save support */
/*
* If we're overiding the ADDBA destination, dump directly
* into the hardware queue, right after any pending
* frames to that node are.
*/
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
__func__, do_override);
#if 1
/*
* Put addba frames in the right place in the right TID/HWQ.
*/
if (do_override) {
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
/*
* XXX if it's addba frames, should we be leaking
* them out via the frame leak method?
* XXX for now let's not risk it; but we may wish
* to investigate this later.
*/
ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
&queue_to_head)) {
/* Queue to software queue */
ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
} else {
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
}
#else
/* Direct-dispatch to the hardware */
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
/*
* Update the current leak count if
* we're leaking frames; and set the
* MORE flag as appropriate.
*/
ath_tx_leak_count_update(sc, tid, bf);
ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
#endif
return 0;
}
/*
* Send a raw frame.
*
* This can be called by net80211.
*/
int
ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct ath_softc *sc = ic->ic_softc;
struct ath_buf *bf;
struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
int error = 0;
ATH_PCU_LOCK(sc);
if (sc->sc_inreset_cnt > 0) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: sc_inreset_cnt > 0; bailing\n", __func__);
error = EIO;
ATH_PCU_UNLOCK(sc);
goto badbad;
}
sc->sc_txstart_cnt++;
ATH_PCU_UNLOCK(sc);
/* Wake the hardware up already */
ATH_LOCK(sc);
ath_power_set_power_state(sc, HAL_PM_AWAKE);
ATH_UNLOCK(sc);
ATH_TX_LOCK(sc);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
- DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
- (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
- "!running" : "invalid");
+ if (!sc->sc_running || sc->sc_invalid) {
+ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, r/i: %d/%d",
+ __func__, sc->sc_running, sc->sc_invalid);
m_freem(m);
error = ENETDOWN;
goto bad;
}
/*
* Enforce how deep the multicast queue can grow.
*
* XXX duplicated in ath_tx_start().
*/
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
> sc->sc_txq_mcastq_maxdepth) {
sc->sc_stats.ast_tx_mcastq_overflow++;
error = ENOBUFS;
}
if (error != 0) {
m_freem(m);
goto bad;
}
}
/*
* Grab a TX buffer and associated resources.
*/
bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
if (bf == NULL) {
sc->sc_stats.ast_tx_nobuf++;
m_freem(m);
error = ENOBUFS;
goto bad;
}
ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
m, params, bf);
if (params == NULL) {
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
*/
if (ath_tx_start(sc, ni, bf, m)) {
error = EIO; /* XXX */
goto bad2;
}
} else {
/*
* Caller supplied explicit parameters to use in
* sending the frame.
*/
if (ath_tx_raw_start(sc, ni, bf, m, params)) {
error = EIO; /* XXX */
goto bad2;
}
}
sc->sc_wd_timer = 5;
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
sc->sc_stats.ast_tx_raw++;
/*
* Update the TIM - if there's anything queued to the
* software queue and power save is enabled, we should
* set the TIM.
*/
ath_tx_update_tim(sc, ni, 1);
ATH_TX_UNLOCK(sc);
ATH_PCU_LOCK(sc);
sc->sc_txstart_cnt--;
ATH_PCU_UNLOCK(sc);
/* Put the hardware back to sleep if required */
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
return 0;
bad2:
ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
"bf=%p",
m,
params,
bf);
ATH_TXBUF_LOCK(sc);
ath_returnbuf_head(sc, bf);
ATH_TXBUF_UNLOCK(sc);
bad:
ATH_TX_UNLOCK(sc);
ATH_PCU_LOCK(sc);
sc->sc_txstart_cnt--;
ATH_PCU_UNLOCK(sc);
/* Put the hardware back to sleep if required */
ATH_LOCK(sc);
ath_power_restore_power_state(sc);
ATH_UNLOCK(sc);
badbad:
ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
m, params);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
sc->sc_stats.ast_tx_raw_fail++;
ieee80211_free_node(ni);
return error;
}
/* Some helper functions */
/*
* ADDBA (and potentially others) need to be placed in the same
* hardware queue as the TID/node it's relating to. This is so
* it goes out after any pending non-aggregate frames to the
* same node/TID.
*
* If this isn't done, the ADDBA can go out before the frames
* queued in hardware. Even though these frames have a sequence
* number -earlier- than the ADDBA can be transmitted (but
* no frames whose sequence numbers are after the ADDBA should
* be!) they'll arrive after the ADDBA - and the receiving end
* will simply drop them as being out of the BAW.
*
* The frames can't be appended to the TID software queue - it'll
* never be sent out. So these frames have to be directly
* dispatched to the hardware, rather than queued in software.
* So if this function returns true, the TXQ has to be
* overridden and it has to be directly dispatched.
*
* It's a dirty hack, but someone's gotta do it.
*/
/*
* XXX doesn't belong here!
*/
static int
ieee80211_is_action(struct ieee80211_frame *wh)
{
/* Type: Management frame? */
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
IEEE80211_FC0_TYPE_MGT)
return 0;
/* Subtype: Action frame? */
if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
IEEE80211_FC0_SUBTYPE_ACTION)
return 0;
return 1;
}
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
/*
* Return an alternate TID for ADDBA request frames.
*
* Yes, this likely should be done in the net80211 layer.
*/
static int
ath_tx_action_frame_override_queue(struct ath_softc *sc,
struct ieee80211_node *ni,
struct mbuf *m0, int *tid)
{
struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
struct ieee80211_action_ba_addbarequest *ia;
uint8_t *frm;
uint16_t baparamset;
/* Not action frame? Bail */
if (! ieee80211_is_action(wh))
return 0;
/* XXX Not needed for frames we send? */
#if 0
/* Correct length? */
if (! ieee80211_parse_action(ni, m))
return 0;
#endif
/* Extract out action frame */
frm = (u_int8_t *)&wh[1];
ia = (struct ieee80211_action_ba_addbarequest *) frm;
/* Not ADDBA? Bail */
if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
return 0;
if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
return 0;
/* Extract TID, return it */
baparamset = le16toh(ia->rq_baparamset);
*tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
return 1;
}
#undef MS
/* Per-node software queue operations */
/*
* Add the current packet to the given BAW.
* It is assumed that the current packet
*
* + fits inside the BAW;
* + already has had a sequence number allocated.
*
* Since the BAW status may be modified by both the ath task and
* the net80211/ifnet contexts, the TID must be locked.
*/
void
ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid, struct ath_buf *bf)
{
int index, cindex;
struct ieee80211_tx_ampdu *tap;
ATH_TX_LOCK_ASSERT(sc);
if (bf->bf_state.bfs_isretried)
return;
tap = ath_tx_get_tx_tid(an, tid->tid);
if (! bf->bf_state.bfs_dobaw) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: dobaw=0, seqno=%d, window %d:%d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno),
tap->txa_start, tap->txa_wnd);
}
if (bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: re-added? tid=%d, seqno %d; window %d:%d; "
"baw head=%d tail=%d\n",
__func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
tap->txa_start, tap->txa_wnd, tid->baw_head,
tid->baw_tail);
/*
* Verify that the given sequence number is not outside of the
* BAW. Complain loudly if that's the case.
*/
if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
SEQNO(bf->bf_state.bfs_seqno))) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
"baw head=%d tail=%d\n",
__func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
tap->txa_start, tap->txa_wnd, tid->baw_head,
tid->baw_tail);
}
/*
* ni->ni_txseqs[] is the currently allocated seqno.
* the txa state contains the current baw start.
*/
index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
"baw head=%d tail=%d\n",
__func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
tid->baw_tail);
#if 0
assert(tid->tx_buf[cindex] == NULL);
#endif
if (tid->tx_buf[cindex] != NULL) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: ba packet dup (index=%d, cindex=%d, "
"head=%d, tail=%d)\n",
__func__, index, cindex, tid->baw_head, tid->baw_tail);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
__func__,
tid->tx_buf[cindex],
SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
bf,
SEQNO(bf->bf_state.bfs_seqno)
);
}
tid->tx_buf[cindex] = bf;
if (index >= ((tid->baw_tail - tid->baw_head) &
(ATH_TID_MAX_BUFS - 1))) {
tid->baw_tail = cindex;
INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
}
}
/*
* Flip the BAW buffer entry over from the existing one to the new one.
*
* When software retransmitting a (sub-)frame, it is entirely possible that
* the frame ath_buf is marked as BUSY and can't be immediately reused.
* In that instance the buffer is cloned and the new buffer is used for
* retransmit. We thus need to update the ath_buf slot in the BAW buf
* tracking array to maintain consistency.
*/
static void
ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
{
int index, cindex;
struct ieee80211_tx_ampdu *tap;
int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
ATH_TX_LOCK_ASSERT(sc);
tap = ath_tx_get_tx_tid(an, tid->tid);
index = ATH_BA_INDEX(tap->txa_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
/*
* Just warn for now; if it happens then we should find out
* about it. It's highly likely the aggregation session will
* soon hang.
*/
if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: retransmitted buffer"
" has mismatching seqno's, BA session may hang.\n",
__func__);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: old seqno=%d, new_seqno=%d\n", __func__,
old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
}
if (tid->tx_buf[cindex] != old_bf) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: ath_buf pointer incorrect; "
" has m BA session may hang.\n", __func__);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
}
tid->tx_buf[cindex] = new_bf;
}
/*
* seq_start - left edge of BAW
* seq_next - current/next sequence number to allocate
*
* Since the BAW status may be modified by both the ath task and
* the net80211/ifnet contexts, the TID must be locked.
*/
static void
ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid, const struct ath_buf *bf)
{
int index, cindex;
struct ieee80211_tx_ampdu *tap;
int seqno = SEQNO(bf->bf_state.bfs_seqno);
ATH_TX_LOCK_ASSERT(sc);
tap = ath_tx_get_tx_tid(an, tid->tid);
index = ATH_BA_INDEX(tap->txa_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
"baw head=%d, tail=%d\n",
__func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
cindex, tid->baw_head, tid->baw_tail);
/*
* If this occurs then we have a big problem - something else
* has slid tap->txa_start along without updating the BAW
* tracking start/end pointers. Thus the TX BAW state is now
* completely busted.
*
* But for now, since I haven't yet fixed TDMA and buffer cloning,
* it's quite possible that a cloned buffer is making its way
* here and causing it to fire off. Disable TDMA for now.
*/
if (tid->tx_buf[cindex] != bf) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
__func__, bf, SEQNO(bf->bf_state.bfs_seqno),
tid->tx_buf[cindex],
(tid->tx_buf[cindex] != NULL) ?
SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
}
tid->tx_buf[cindex] = NULL;
while (tid->baw_head != tid->baw_tail &&
!tid->tx_buf[tid->baw_head]) {
INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
INCR(tid->baw_head, ATH_TID_MAX_BUFS);
}
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: tid=%d: baw is now %d:%d, baw head=%d\n",
__func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head);
}
static void
ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
struct ath_buf *bf)
{
struct ieee80211_frame *wh;
ATH_TX_LOCK_ASSERT(sc);
if (tid->an->an_leak_count > 0) {
wh = mtod(bf->bf_m, struct ieee80211_frame *);
/*
* Update MORE based on the software/net80211 queue states.
*/
if ((tid->an->an_stack_psq > 0)
|| (tid->an->an_swq_depth > 0))
wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
else
wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
"%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
__func__,
tid->an->an_node.ni_macaddr,
":",
tid->an->an_leak_count,
tid->an->an_stack_psq,
tid->an->an_swq_depth,
!! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
/*
* Re-sync the underlying buffer.
*/
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_PREWRITE);
tid->an->an_leak_count --;
}
}
static int
ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
{
ATH_TX_LOCK_ASSERT(sc);
if (tid->an->an_leak_count > 0) {
return (1);
}
if (tid->paused)
return (0);
return (1);
}
/*
* Mark the current node/TID as ready to TX.
*
* This is done to make it easy for the software scheduler to
* find which nodes have data to send.
*
* The TXQ lock must be held.
*/
void
ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
{
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
ATH_TX_LOCK_ASSERT(sc);
/*
* If we are leaking out a frame to this destination
* for PS-POLL, ensure that we allow scheduling to
* occur.
*/
if (! ath_tx_tid_can_tx_or_sched(sc, tid))
return; /* paused, can't schedule yet */
if (tid->sched)
return; /* already scheduled */
tid->sched = 1;
#if 0
/*
* If this is a sleeping node we're leaking to, given
* it a higher priority. This is so bad for QoS it hurts.
*/
if (tid->an->an_leak_count) {
TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
} else {
TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
}
#endif
/*
* We can't do the above - it'll confuse the TXQ software
* scheduler which will keep checking the _head_ TID
* in the list to see if it has traffic. If we queue
* a TID to the head of the list and it doesn't transmit,
* we'll check it again.
*
* So, get the rest of this leaking frames support working
* and reliable first and _then_ optimise it so they're
* pushed out in front of any other pending software
* queued nodes.
*/
TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
}
/*
* Mark the current node as no longer needing to be polled for
* TX packets.
*
* The TXQ lock must be held.
*/
static void
ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
{
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
ATH_TX_LOCK_ASSERT(sc);
if (tid->sched == 0)
return;
tid->sched = 0;
TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
}
/*
* Assign a sequence number manually to the given frame.
*
* This should only be called for A-MPDU TX frames.
*/
static ieee80211_seq
ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_buf *bf, struct mbuf *m0)
{
struct ieee80211_frame *wh;
int tid, pri;
ieee80211_seq seqno;
uint8_t subtype;
/* TID lookup */
wh = mtod(m0, struct ieee80211_frame *);
pri = M_WME_GETAC(m0); /* honor classification */
tid = WME_AC_TO_TID(pri);
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
__func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
/* XXX Is it a control frame? Ignore */
/* Does the packet require a sequence number? */
if (! IEEE80211_QOS_HAS_SEQ(wh))
return -1;
ATH_TX_LOCK_ASSERT(sc);
/*
* Is it a QOS NULL Data frame? Give it a sequence number from
* the default TID (IEEE80211_NONQOS_TID.)
*
* The RX path of everything I've looked at doesn't include the NULL
* data frame sequence number in the aggregation state updates, so
* assigning it a sequence number there will cause a BAW hole on the
* RX side.
*/
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
/* XXX no locking for this TID? This is a bit of a problem. */
seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
} else {
/* Manually assign sequence number */
seqno = ni->ni_txseqs[tid];
INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
}
*(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
M_SEQNO_SET(m0, seqno);
/* Return so caller can do something with it if needed */
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno);
return seqno;
}
/*
* Attempt to direct dispatch an aggregate frame to hardware.
* If the frame is out of BAW, queue.
* Otherwise, schedule it as a single frame.
*/
static void
ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
struct ath_txq *txq, struct ath_buf *bf)
{
struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
struct ieee80211_tx_ampdu *tap;
ATH_TX_LOCK_ASSERT(sc);
tap = ath_tx_get_tx_tid(an, tid->tid);
/* paused? queue */
if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
ATH_TID_INSERT_HEAD(tid, bf, bf_list);
/* XXX don't sched - we're paused! */
return;
}
/* outside baw? queue */
if (bf->bf_state.bfs_dobaw &&
(! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
SEQNO(bf->bf_state.bfs_seqno)))) {
ATH_TID_INSERT_HEAD(tid, bf, bf_list);
ath_tx_tid_sched(sc, tid);
return;
}
/*
* This is a temporary check and should be removed once
* all the relevant code paths have been fixed.
*
* During aggregate retries, it's possible that the head
* frame will fail (which has the bfs_aggr and bfs_nframes
* fields set for said aggregate) and will be retried as
* a single frame. In this instance, the values should
* be reset or the completion code will get upset with you.
*/
if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
bf->bf_state.bfs_aggr = 0;
bf->bf_state.bfs_nframes = 1;
}
/* Update CLRDMASK just before this frame is queued */
ath_tx_update_clrdmask(sc, tid, bf);
/* Direct dispatch to hardware */
ath_tx_do_ratelookup(sc, bf);
ath_tx_calc_duration(sc, bf);
ath_tx_calc_protection(sc, bf);
ath_tx_set_rtscts(sc, bf);
ath_tx_rate_fill_rcflags(sc, bf);
ath_tx_setds(sc, bf);
/* Statistics */
sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
/* Track per-TID hardware queue depth correctly */
tid->hwq_depth++;
/* Add to BAW */
if (bf->bf_state.bfs_dobaw) {
ath_tx_addto_baw(sc, an, tid, bf);
bf->bf_state.bfs_addedbaw = 1;
}
/* Set completion handler, multi-frame aggregate or not */
bf->bf_comp = ath_tx_aggr_comp;
/*
* Update the current leak count if
* we're leaking frames; and set the
* MORE flag as appropriate.
*/
ath_tx_leak_count_update(sc, tid, bf);
/* Hand off to hardware */
ath_tx_handoff(sc, txq, bf);
}
/*
* Attempt to send the packet.
* If the queue isn't busy, direct-dispatch.
* If the queue is busy enough, queue the given packet on the
* relevant software queue.
*/
void
ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
{
struct ath_node *an = ATH_NODE(ni);
struct ieee80211_frame *wh;
struct ath_tid *atid;
int pri, tid;
struct mbuf *m0 = bf->bf_m;
ATH_TX_LOCK_ASSERT(sc);
/* Fetch the TID - non-QoS frames get assigned to TID 16 */
wh = mtod(m0, struct ieee80211_frame *);
pri = ath_tx_getac(sc, m0);
tid = ath_tx_gettid(sc, m0);
atid = &an->an_tid[tid];
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
__func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
/* Set local packet state, used to queue packets to hardware */
/* XXX potentially duplicate info, re-check */
bf->bf_state.bfs_tid = tid;
bf->bf_state.bfs_tx_queue = txq->axq_qnum;
bf->bf_state.bfs_pri = pri;
/*
* If the hardware queue isn't busy, queue it directly.
* If the hardware queue is busy, queue it.
* If the TID is paused or the traffic it outside BAW, software
* queue it.
*
* If the node is in power-save and we're leaking a frame,
* leak a single frame.
*/
if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
/* TID is paused, queue */
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
/*
* If the caller requested that it be sent at a high
* priority, queue it at the head of the list.
*/
if (queue_to_head)
ATH_TID_INSERT_HEAD(atid, bf, bf_list);
else
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
} else if (ath_tx_ampdu_pending(sc, an, tid)) {
/* AMPDU pending; queue */
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
/* XXX sched? */
} else if (ath_tx_ampdu_running(sc, an, tid)) {
/* AMPDU running, attempt direct dispatch if possible */
/*
* Always queue the frame to the tail of the list.
*/
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
/*
* If the hardware queue isn't busy, direct dispatch
* the head frame in the list. Don't schedule the
* TID - let it build some more frames first?
*
* When running A-MPDU, always just check the hardware
* queue depth against the aggregate frame limit.
* We don't want to burst a large number of single frames
* out to the hardware; we want to aggressively hold back.
*
* Otherwise, schedule the TID.
*/
/* XXX TXQ locking */
if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
bf = ATH_TID_FIRST(atid);
ATH_TID_REMOVE(atid, bf, bf_list);
/*
* Ensure it's definitely treated as a non-AMPDU
* frame - this information may have been left
* over from a previous attempt.
*/
bf->bf_state.bfs_aggr = 0;
bf->bf_state.bfs_nframes = 1;
/* Queue to the hardware */
ath_tx_xmit_aggr(sc, an, txq, bf);
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: xmit_aggr\n",
__func__);
} else {
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: ampdu; swq'ing\n",
__func__);
ath_tx_tid_sched(sc, atid);
}
/*
* If we're not doing A-MPDU, be prepared to direct dispatch
* up to both limits if possible. This particular corner
* case may end up with packet starvation between aggregate
* traffic and non-aggregate traffic: we wnat to ensure
* that non-aggregate stations get a few frames queued to the
* hardware before the aggregate station(s) get their chance.
*
* So if you only ever see a couple of frames direct dispatched
* to the hardware from a non-AMPDU client, check both here
* and in the software queue dispatcher to ensure that those
* non-AMPDU stations get a fair chance to transmit.
*/
/* XXX TXQ locking */
} else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
(txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
/* AMPDU not running, attempt direct dispatch */
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
/* See if clrdmask needs to be set */
ath_tx_update_clrdmask(sc, atid, bf);
/*
* Update the current leak count if
* we're leaking frames; and set the
* MORE flag as appropriate.
*/
ath_tx_leak_count_update(sc, atid, bf);
/*
* Dispatch the frame.
*/
ath_tx_xmit_normal(sc, txq, bf);
} else {
/* Busy; queue */
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
ath_tx_tid_sched(sc, atid);
}
}
/*
* Only set the clrdmask bit if none of the nodes are currently
* filtered.
*
* XXX TODO: go through all the callers and check to see
* which are being called in the context of looping over all
* TIDs (eg, if all tids are being paused, resumed, etc.)
* That'll avoid O(n^2) complexity here.
*/
static void
ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
{
int i;
ATH_TX_LOCK_ASSERT(sc);
for (i = 0; i < IEEE80211_TID_SIZE; i++) {
if (an->an_tid[i].isfiltered == 1)
return;
}
an->clrdmask = 1;
}
/*
* Configure the per-TID node state.
*
* This likely belongs in if_ath_node.c but I can't think of anywhere
* else to put it just yet.
*
* This sets up the SLISTs and the mutex as appropriate.
*/
void
ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
{
int i, j;
struct ath_tid *atid;
for (i = 0; i < IEEE80211_TID_SIZE; i++) {
atid = &an->an_tid[i];
/* XXX now with this bzer(), is the field 0'ing needed? */
bzero(atid, sizeof(*atid));
TAILQ_INIT(&atid->tid_q);
TAILQ_INIT(&atid->filtq.tid_q);
atid->tid = i;
atid->an = an;
for (j = 0; j < ATH_TID_MAX_BUFS; j++)
atid->tx_buf[j] = NULL;
atid->baw_head = atid->baw_tail = 0;
atid->paused = 0;
atid->sched = 0;
atid->hwq_depth = 0;
atid->cleanup_inprogress = 0;
if (i == IEEE80211_NONQOS_TID)
atid->ac = ATH_NONQOS_TID_AC;
else
atid->ac = TID_TO_WME_AC(i);
}
an->clrdmask = 1; /* Always start by setting this bit */
}
/*
* Pause the current TID. This stops packets from being transmitted
* on it.
*
* Since this is also called from upper layers as well as the driver,
* it will get the TID lock.
*/
static void
ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
{
ATH_TX_LOCK_ASSERT(sc);
tid->paused++;
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: [%6D]: tid=%d, paused = %d\n",
__func__,
tid->an->an_node.ni_macaddr, ":",
tid->tid,
tid->paused);
}
/*
* Unpause the current TID, and schedule it if needed.
*/
static void
ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
{
ATH_TX_LOCK_ASSERT(sc);
/*
* There's some odd places where ath_tx_tid_resume() is called
* when it shouldn't be; this works around that particular issue
* until it's actually resolved.
*/
if (tid->paused == 0) {
device_printf(sc->sc_dev,
"%s: [%6D]: tid=%d, paused=0?\n",
__func__,
tid->an->an_node.ni_macaddr, ":",
tid->tid);
} else {
tid->paused--;
}
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: [%6D]: tid=%d, unpaused = %d\n",
__func__,
tid->an->an_node.ni_macaddr, ":",
tid->tid,
tid->paused);
if (tid->paused)
return;
/*
* Override the clrdmask configuration for the next frame
* from this TID, just to get the ball rolling.
*/
ath_tx_set_clrdmask(sc, tid->an);
if (tid->axq_depth == 0)
return;
/* XXX isfiltered shouldn't ever be 0 at this point */
if (tid->isfiltered == 1) {
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
__func__);
return;
}
ath_tx_tid_sched(sc, tid);
/*
* Queue the software TX scheduler.
*/
ath_tx_swq_kick(sc);
}
/*
* Add the given ath_buf to the TID filtered frame list.
* This requires the TID be filtered.
*/
static void
ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
struct ath_buf *bf)
{
ATH_TX_LOCK_ASSERT(sc);
if (!tid->isfiltered)
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
__func__);
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
/* Set the retry bit and bump the retry counter */
ath_tx_set_retry(sc, bf);
sc->sc_stats.ast_tx_swfiltered++;
ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
}
/*
* Handle a completed filtered frame from the given TID.
* This just enables/pauses the filtered frame state if required
* and appends the filtered frame to the filtered queue.
*/
static void
ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
struct ath_buf *bf)
{
ATH_TX_LOCK_ASSERT(sc);
if (! tid->isfiltered) {
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d; filter transition\n",
__func__, tid->tid);
tid->isfiltered = 1;
ath_tx_tid_pause(sc, tid);
}
/* Add the frame to the filter queue */
ath_tx_tid_filt_addbuf(sc, tid, bf);
}
/*
* Complete the filtered frame TX completion.
*
* If there are no more frames in the hardware queue, unpause/unfilter
* the TID if applicable. Otherwise we will wait for a node PS transition
* to unfilter.
*/
static void
ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
{
struct ath_buf *bf;
int do_resume = 0;
ATH_TX_LOCK_ASSERT(sc);
if (tid->hwq_depth != 0)
return;
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: tid=%d, hwq=0, transition back\n",
__func__, tid->tid);
if (tid->isfiltered == 1) {
tid->isfiltered = 0;
do_resume = 1;
}
/* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
ath_tx_set_clrdmask(sc, tid->an);
/* XXX this is really quite inefficient */
while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
ATH_TID_FILT_REMOVE(tid, bf, bf_list);
ATH_TID_INSERT_HEAD(tid, bf, bf_list);
}
/* And only resume if we had paused before */
if (do_resume)
ath_tx_tid_resume(sc, tid);
}
/*
* Called when a single (aggregate or otherwise) frame is completed.
*
* Returns 0 if the buffer could be added to the filtered list
* (cloned or otherwise), 1 if the buffer couldn't be added to the
* filtered list (failed clone; expired retry) and the caller should
* free it and handle it like a failure (eg by sending a BAR.)
*
* since the buffer may be cloned, bf must be not touched after this
* if the return value is 0.
*/
static int
ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
struct ath_buf *bf)
{
struct ath_buf *nbf;
int retval;
ATH_TX_LOCK_ASSERT(sc);
/*
* Don't allow a filtered frame to live forever.
*/
if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
sc->sc_stats.ast_tx_swretrymax++;
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
"%s: bf=%p, seqno=%d, exceeded retries\n",
__func__,
bf,
SEQNO(bf->bf_state.bfs_seqno));
retval = 1; /* error */
goto finish;
}
/*
* A busy buffer can't be added to the retry list.
* It needs to be cloned.
*/
if (bf->bf_flags & ATH_BUF_BUSY) {
nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
"%s: busy buffer clone: %p -> %p\n",
__func__, bf, nbf);
} else {
nbf = bf;
}
if (nbf == NULL) {
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
"%s: busy buffer couldn't be cloned (%p)!\n",
__func__, bf);
retval = 1; /* error */
} else {
ath_tx_tid_filt_comp_buf(sc, tid, nbf);
retval = 0; /* ok */
}
finish:
ath_tx_tid_filt_comp_complete(sc, tid);
return (retval);
}
static void
ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
struct ath_buf *bf_first, ath_bufhead *bf_q)
{
struct ath_buf *bf, *bf_next, *nbf;
ATH_TX_LOCK_ASSERT(sc);
bf = bf_first;
while (bf) {
bf_next = bf->bf_next;
bf->bf_next = NULL; /* Remove it from the aggr list */
/*
* Don't allow a filtered frame to live forever.
*/
if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
sc->sc_stats.ast_tx_swretrymax++;
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
"%s: tid=%d, bf=%p, seqno=%d, exceeded retries\n",
__func__,
tid->tid,
bf,
SEQNO(bf->bf_state.bfs_seqno));
TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
goto next;
}
if (bf->bf_flags & ATH_BUF_BUSY) {
nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
"%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n",
__func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno));
} else {
nbf = bf;
}
/*
* If the buffer couldn't be cloned, add it to bf_q;
* the caller will free the buffer(s) as required.
*/
if (nbf == NULL) {
DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
"%s: tid=%d, buffer couldn't be cloned! (%p) seqno=%d\n",
__func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno));
TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
} else {
ath_tx_tid_filt_comp_buf(sc, tid, nbf);
}
next:
bf = bf_next;
}
ath_tx_tid_filt_comp_complete(sc, tid);
}
/*
* Suspend the queue because we need to TX a BAR.
*/
static void
ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
{
ATH_TX_LOCK_ASSERT(sc);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
__func__,
tid->tid,
tid->bar_wait,
tid->bar_tx);
/* We shouldn't be called when bar_tx is 1 */
if (tid->bar_tx) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: bar_tx is 1?!\n", __func__);
}
/* If we've already been called, just be patient. */
if (tid->bar_wait)
return;
/* Wait! */
tid->bar_wait = 1;
/* Only one pause, no matter how many frames fail */
ath_tx_tid_pause(sc, tid);
}
/*
* We've finished with BAR handling - either we succeeded or
* failed. Either way, unsuspend TX.
*/
static void
ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
{
ATH_TX_LOCK_ASSERT(sc);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: %6D: TID=%d, called\n",
__func__,
tid->an->an_node.ni_macaddr,
":",
tid->tid);
if (tid->bar_tx == 0 || tid->bar_wait == 0) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
__func__, tid->an->an_node.ni_macaddr, ":",
tid->tid, tid->bar_tx, tid->bar_wait);
}
tid->bar_tx = tid->bar_wait = 0;
ath_tx_tid_resume(sc, tid);
}
/*
* Return whether we're ready to TX a BAR frame.
*
* Requires the TID lock be held.
*/
static int
ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
{
ATH_TX_LOCK_ASSERT(sc);
if (tid->bar_wait == 0 || tid->hwq_depth > 0)
return (0);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: %6D: TID=%d, bar ready\n",
__func__,
tid->an->an_node.ni_macaddr,
":",
tid->tid);
return (1);
}
/*
* Check whether the current TID is ready to have a BAR
* TXed and if so, do the TX.
*
* Since the TID/TXQ lock can't be held during a call to
* ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
* sending the BAR and locking it again.
*
* Eventually, the code to send the BAR should be broken out
* from this routine so the lock doesn't have to be reacquired
* just to be immediately dropped by the caller.
*/
static void
ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
{
struct ieee80211_tx_ampdu *tap;
ATH_TX_LOCK_ASSERT(sc);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: %6D: TID=%d, called\n",
__func__,
tid->an->an_node.ni_macaddr,
":",
tid->tid);
tap = ath_tx_get_tx_tid(tid->an, tid->tid);
/*
* This is an error condition!
*/
if (tid->bar_wait == 0 || tid->bar_tx == 1) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
__func__, tid->an->an_node.ni_macaddr, ":",
tid->tid, tid->bar_tx, tid->bar_wait);
return;
}
/* Don't do anything if we still have pending frames */
if (tid->hwq_depth > 0) {
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
__func__,
tid->an->an_node.ni_macaddr,
":",
tid->tid,
tid->hwq_depth);
return;
}
/* We're now about to TX */
tid->bar_tx = 1;
/*
* Override the clrdmask configuration for the next frame,
* just to get the ball rolling.
*/
ath_tx_set_clrdmask(sc, tid->an);
/*
* Calculate new BAW left edge, now that all frames have either
* succeeded or failed.
*
* XXX verify this is _actually_ the valid value to begin at!
*/
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: %6D: TID=%d, new BAW left edge=%d\n",
__func__,
tid->an->an_node.ni_macaddr,
":",
tid->tid,
tap->txa_start);
/* Try sending the BAR frame */
/* We can't hold the lock here! */
ATH_TX_UNLOCK(sc);
if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
/* Success? Now we wait for notification that it's done */
ATH_TX_LOCK(sc);
return;
}
/* Failure? For now, warn loudly and continue */
ATH_TX_LOCK(sc);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: %6D: TID=%d, failed to TX BAR, continue!\n",
__func__, tid->an->an_node.ni_macaddr, ":",
tid->tid);
ath_tx_tid_bar_unsuspend(sc, tid);
}
static void
ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
{
ATH_TX_LOCK_ASSERT(sc);
/*
* If the current TID is running AMPDU, update
* the BAW.
*/
if (ath_tx_ampdu_running(sc, an, tid->tid) &&
bf->bf_state.bfs_dobaw) {
/*
* Only remove the frame from the BAW if it's
* been transmitted at least once; this means
* the frame was in the BAW to begin with.
*/
if (bf->bf_state.bfs_retries > 0) {
ath_tx_update_baw(sc, an, tid, bf);
bf->bf_state.bfs_dobaw = 0;
}
#if 0
/*
* This has become a non-fatal error now
*/
if (! bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
#endif
}
/* Strip it out of an aggregate list if it was in one */
bf->bf_next = NULL;
/* Insert on the free queue to be freed by the caller */
TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
}
static void
ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
{
struct ieee80211_node *ni = &an->an_node;
struct ath_txq *txq;
struct ieee80211_tx_ampdu *tap;
txq = sc->sc_ac2q[tid->ac];
tap = ath_tx_get_tx_tid(an, tid->tid);
DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
"%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
"seqno=%d, retry=%d\n",
__func__,
pfx,
ni->ni_macaddr,
":",
bf,
bf->bf_state.bfs_addedbaw,
bf->bf_state.bfs_dobaw,
SEQNO(bf->bf_state.bfs_seqno),
bf->bf_state.bfs_retries);
DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
"%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
__func__,
pfx,
ni->ni_macaddr,
":",
bf,
txq->axq_qnum,
txq->axq_depth,
txq->axq_aggr_depth);
DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
"%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
"isfiltered=%d\n",
__func__,
pfx,
ni->ni_macaddr,
":",
bf,
tid->axq_depth,
tid->hwq_depth,
tid->bar_wait,
tid->isfiltered);
DPRINTF(sc, ATH_DEBUG_SW_TX | ATH_DEBUG_RESET,
"%s: %s: %6D: tid %d: "
"sched=%d, paused=%d, "
"incomp=%d, baw_head=%d, "
"baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
__func__,
pfx,
ni->ni_macaddr,
":",
tid->tid,
tid->sched, tid->paused,
tid->incomp, tid->baw_head,
tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
ni->ni_txseqs[tid->tid]);
/* XXX Dump the frame, see what it is? */
if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
ieee80211_dump_pkt(ni->ni_ic,
mtod(bf->bf_m, const uint8_t *),
bf->bf_m->m_len, 0, -1);
}
/*
* Free any packets currently pending in the software TX queue.
*
* This will be called when a node is being deleted.
*
* It can also be called on an active node during an interface
* reset or state transition.
*
* (From Linux/reference):
*
* TODO: For frame(s) that are in the retry state, we will reuse the
* sequence number(s) without setting the retry bit. The
* alternative is to give up on these and BAR the receiver's window
* forward.
*/
static void
ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid, ath_bufhead *bf_cq)
{
struct ath_buf *bf;
struct ieee80211_tx_ampdu *tap;
struct ieee80211_node *ni = &an->an_node;
int t;
tap = ath_tx_get_tx_tid(an, tid->tid);
ATH_TX_LOCK_ASSERT(sc);
/* Walk the queue, free frames */
t = 0;
for (;;) {
bf = ATH_TID_FIRST(tid);
if (bf == NULL) {
break;
}
if (t == 0) {
ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
// t = 1;
}
ATH_TID_REMOVE(tid, bf, bf_list);
ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
}
/* And now, drain the filtered frame queue */
t = 0;
for (;;) {
bf = ATH_TID_FILT_FIRST(tid);
if (bf == NULL)
break;
if (t == 0) {
ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
// t = 1;
}
ATH_TID_FILT_REMOVE(tid, bf, bf_list);
ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
}
/*
* Override the clrdmask configuration for the next frame
* in case there is some future transmission, just to get
* the ball rolling.
*
* This won't hurt things if the TID is about to be freed.
*/
ath_tx_set_clrdmask(sc, tid->an);
/*
* Now that it's completed, grab the TID lock and update
* the sequence number and BAW window.
* Because sequence numbers have been assigned to frames
* that haven't been sent yet, it's entirely possible
* we'll be called with some pending frames that have not
* been transmitted.
*
* The cleaner solution is to do the sequence number allocation
* when the packet is first transmitted - and thus the "retries"
* check above would be enough to update the BAW/seqno.
*/
/* But don't do it for non-QoS TIDs */
if (tap) {
#if 1
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
__func__,
ni->ni_macaddr,
":",
an,
tid->tid,
tap->txa_start);
#endif
ni->ni_txseqs[tid->tid] = tap->txa_start;
tid->baw_tail = tid->baw_head;
}
}
/*
* Reset the TID state. This must be only called once the node has
* had its frames flushed from this TID, to ensure that no other
* pause / unpause logic can kick in.
*/
static void
ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
{
#if 0
tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
tid->paused = tid->sched = tid->addba_tx_pending = 0;
tid->incomp = tid->cleanup_inprogress = 0;
#endif
/*
* If we have a bar_wait set, we need to unpause the TID
* here. Otherwise once cleanup has finished, the TID won't
* have the right paused counter.
*
* XXX I'm not going through resume here - I don't want the
* node to be rescheuled just yet. This however should be
* methodized!
*/
if (tid->bar_wait) {
if (tid->paused > 0) {
tid->paused --;
}
}
/*
* XXX same with a currently filtered TID.
*
* Since this is being called during a flush, we assume that
* the filtered frame list is actually empty.
*
* XXX TODO: add in a check to ensure that the filtered queue
* depth is actually 0!
*/
if (tid->isfiltered) {
if (tid->paused > 0) {
tid->paused --;
}
}
/*
* Clear BAR, filtered frames, scheduled and ADDBA pending.
* The TID may be going through cleanup from the last association
* where things in the BAW are still in the hardware queue.
*/
tid->bar_wait = 0;
tid->bar_tx = 0;
tid->isfiltered = 0;
tid->sched = 0;
tid->addba_tx_pending = 0;
/*
* XXX TODO: it may just be enough to walk the HWQs and mark
* frames for that node as non-aggregate; or mark the ath_node
* with something that indicates that aggregation is no longer
* occuring. Then we can just toss the BAW complaints and
* do a complete hard reset of state here - no pause, no
* complete counter, etc.
*/
}
/*
* Flush all software queued packets for the given node.
*
* This occurs when a completion handler frees the last buffer
* for a node, and the node is thus freed. This causes the node
* to be cleaned up, which ends up calling ath_tx_node_flush.
*/
void
ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
{
int tid;
ath_bufhead bf_cq;
struct ath_buf *bf;
TAILQ_INIT(&bf_cq);
ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
&an->an_node);
ATH_TX_LOCK(sc);
DPRINTF(sc, ATH_DEBUG_NODE,
"%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
"swq_depth=%d, clrdmask=%d, leak_count=%d\n",
__func__,
an->an_node.ni_macaddr,
":",
an->an_is_powersave,
an->an_stack_psq,
an->an_tim_set,
an->an_swq_depth,
an->clrdmask,
an->an_leak_count);
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
struct ath_tid *atid = &an->an_tid[tid];
/* Free packets */
ath_tx_tid_drain(sc, an, atid, &bf_cq);
/* Remove this tid from the list of active tids */
ath_tx_tid_unsched(sc, atid);
/* Reset the per-TID pause, BAR, etc state */
ath_tx_tid_reset(sc, atid);
}
/*
* Clear global leak count
*/
an->an_leak_count = 0;
ATH_TX_UNLOCK(sc);
/* Handle completed frames */
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
TAILQ_REMOVE(&bf_cq, bf, bf_list);
ath_tx_default_comp(sc, bf, 0);
}
}
/*
* Drain all the software TXQs currently with traffic queued.
*/
void
ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_tid *tid;
ath_bufhead bf_cq;
struct ath_buf *bf;
TAILQ_INIT(&bf_cq);
ATH_TX_LOCK(sc);
/*
* Iterate over all active tids for the given txq,
* flushing and unsched'ing them
*/
while (! TAILQ_EMPTY(&txq->axq_tidq)) {
tid = TAILQ_FIRST(&txq->axq_tidq);
ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
ath_tx_tid_unsched(sc, tid);
}
ATH_TX_UNLOCK(sc);
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
TAILQ_REMOVE(&bf_cq, bf, bf_list);
ath_tx_default_comp(sc, bf, 0);
}
}
/*
* Handle completion of non-aggregate session frames.
*
* This (currently) doesn't implement software retransmission of
* non-aggregate frames!
*
* Software retransmission of non-aggregate frames needs to obey
* the strict sequence number ordering, and drop any frames that
* will fail this.
*
* For now, filtered frames and frame transmission will cause
* all kinds of issues. So we don't support them.
*
* So anyone queuing frames via ath_tx_normal_xmit() or
* ath_tx_hw_queue_norm() must override and set CLRDMASK.
*/
void
ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
{
struct ieee80211_node *ni = bf->bf_node;
struct ath_node *an = ATH_NODE(ni);
int tid = bf->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
/* The TID state is protected behind the TXQ lock */
ATH_TX_LOCK(sc);
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
__func__, bf, fail, atid->hwq_depth - 1);
atid->hwq_depth--;
#if 0
/*
* If the frame was filtered, stick it on the filter frame
* queue and complain about it. It shouldn't happen!
*/
if ((ts->ts_status & HAL_TXERR_FILT) ||
(ts->ts_status != 0 && atid->isfiltered)) {
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: isfiltered=%d, ts_status=%d: huh?\n",
__func__,
atid->isfiltered,
ts->ts_status);
ath_tx_tid_filt_comp_buf(sc, atid, bf);
}
#endif
if (atid->isfiltered)
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
if (atid->hwq_depth < 0)
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
__func__, atid->hwq_depth);
/* If the TID is being cleaned up, track things */
/* XXX refactor! */
if (atid->cleanup_inprogress) {
atid->incomp--;
if (atid->incomp == 0) {
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: TID %d: cleaned up! resume!\n",
__func__, tid);
atid->cleanup_inprogress = 0;
ath_tx_tid_resume(sc, atid);
}
}
/*
* If the queue is filtered, potentially mark it as complete
* and reschedule it as needed.
*
* This is required as there may be a subsequent TX descriptor
* for this end-node that has CLRDMASK set, so it's quite possible
* that a filtered frame will be followed by a non-filtered
* (complete or otherwise) frame.
*
* XXX should we do this before we complete the frame?
*/
if (atid->isfiltered)
ath_tx_tid_filt_comp_complete(sc, atid);
ATH_TX_UNLOCK(sc);
/*
* punt to rate control if we're not being cleaned up
* during a hw queue drain and the frame wanted an ACK.
*/
if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
ts, bf->bf_state.bfs_pktlen,
1, (ts->ts_status == 0) ? 0 : 1);
ath_tx_default_comp(sc, bf, fail);
}
/*
* Handle cleanup of aggregate session packets that aren't
* an A-MPDU.
*
* There's no need to update the BAW here - the session is being
* torn down.
*/
static void
ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
{
struct ieee80211_node *ni = bf->bf_node;
struct ath_node *an = ATH_NODE(ni);
int tid = bf->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
__func__, tid, atid->incomp);
ATH_TX_LOCK(sc);
atid->incomp--;
/* XXX refactor! */
if (bf->bf_state.bfs_dobaw) {
ath_tx_update_baw(sc, an, atid, bf);
if (!bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
}
if (atid->incomp == 0) {
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: TID %d: cleaned up! resume!\n",
__func__, tid);
atid->cleanup_inprogress = 0;
ath_tx_tid_resume(sc, atid);
}
ATH_TX_UNLOCK(sc);
ath_tx_default_comp(sc, bf, 0);
}
/*
* This as it currently stands is a bit dumb. Ideally we'd just
* fail the frame the normal way and have it permanently fail
* via the normal aggregate completion path.
*/
static void
ath_tx_tid_cleanup_frame(struct ath_softc *sc, struct ath_node *an,
int tid, struct ath_buf *bf_head, ath_bufhead *bf_cq)
{
struct ath_tid *atid = &an->an_tid[tid];
struct ath_buf *bf, *bf_next;
ATH_TX_LOCK_ASSERT(sc);
/*
* Remove this frame from the queue.
*/
ATH_TID_REMOVE(atid, bf_head, bf_list);
/*
* Loop over all the frames in the aggregate.
*/
bf = bf_head;
while (bf != NULL) {
bf_next = bf->bf_next; /* next aggregate frame, or NULL */
/*
* If it's been added to the BAW we need to kick
* it out of the BAW before we continue.
*
* XXX if it's an aggregate, assert that it's in the
* BAW - we shouldn't have it be in an aggregate
* otherwise!
*/
if (bf->bf_state.bfs_addedbaw) {
ath_tx_update_baw(sc, an, atid, bf);
bf->bf_state.bfs_dobaw = 0;
}
/*
* Give it the default completion handler.
*/
bf->bf_comp = ath_tx_normal_comp;
bf->bf_next = NULL;
/*
* Add it to the list to free.
*/
TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
/*
* Now advance to the next frame in the aggregate.
*/
bf = bf_next;
}
}
/*
* Performs transmit side cleanup when TID changes from aggregated to
* unaggregated and during reassociation.
*
* For now, this just tosses everything from the TID software queue
* whether or not it has been retried and marks the TID as
* pending completion if there's anything for this TID queued to
* the hardware.
*
* The caller is responsible for pausing the TID and unpausing the
* TID if no cleanup was required. Otherwise the cleanup path will
* unpause the TID once the last hardware queued frame is completed.
*/
static void
ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
ath_bufhead *bf_cq)
{
struct ath_tid *atid = &an->an_tid[tid];
struct ath_buf *bf, *bf_next;
ATH_TX_LOCK_ASSERT(sc);
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: TID %d: called; inprogress=%d\n", __func__, tid,
atid->cleanup_inprogress);
/*
* Move the filtered frames to the TX queue, before
* we run off and discard/process things.
*/
/* XXX this is really quite inefficient */
while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
ATH_TID_FILT_REMOVE(atid, bf, bf_list);
ATH_TID_INSERT_HEAD(atid, bf, bf_list);
}
/*
* Update the frames in the software TX queue:
*
* + Discard retry frames in the queue
* + Fix the completion function to be non-aggregate
*/
bf = ATH_TID_FIRST(atid);
while (bf) {
/*
* Grab the next frame in the list, we may
* be fiddling with the list.
*/
bf_next = TAILQ_NEXT(bf, bf_list);
/*
* Free the frame and all subframes.
*/
ath_tx_tid_cleanup_frame(sc, an, tid, bf, bf_cq);
/*
* Next frame!
*/
bf = bf_next;
}
/*
* If there's anything in the hardware queue we wait
* for the TID HWQ to empty.
*/
if (atid->hwq_depth > 0) {
/*
* XXX how about we kill atid->incomp, and instead
* replace it with a macro that checks that atid->hwq_depth
* is 0?
*/
atid->incomp = atid->hwq_depth;
atid->cleanup_inprogress = 1;
}
if (atid->cleanup_inprogress)
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: TID %d: cleanup needed: %d packets\n",
__func__, tid, atid->incomp);
/* Owner now must free completed frames */
}
static struct ath_buf *
ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid, struct ath_buf *bf)
{
struct ath_buf *nbf;
int error;
/*
* Clone the buffer. This will handle the dma unmap and
* copy the node reference to the new buffer. If this
* works out, 'bf' will have no DMA mapping, no mbuf
* pointer and no node reference.
*/
nbf = ath_buf_clone(sc, bf);
#if 0
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
__func__);
#endif
if (nbf == NULL) {
/* Failed to clone */
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: failed to clone a busy buffer\n",
__func__);
return NULL;
}
/* Setup the dma for the new buffer */
error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
if (error != 0) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: failed to setup dma for clone\n",
__func__);
/*
* Put this at the head of the list, not tail;
* that way it doesn't interfere with the
* busy buffer logic (which uses the tail of
* the list.)
*/
ATH_TXBUF_LOCK(sc);
ath_returnbuf_head(sc, nbf);
ATH_TXBUF_UNLOCK(sc);
return NULL;
}
/* Update BAW if required, before we free the original buf */
if (bf->bf_state.bfs_dobaw)
ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
/* Free original buffer; return new buffer */
ath_freebuf(sc, bf);
return nbf;
}
/*
* Handle retrying an unaggregate frame in an aggregate
* session.
*
* If too many retries occur, pause the TID, wait for
* any further retransmits (as there's no reason why
* non-aggregate frames in an aggregate session are
* transmitted in-order; they just have to be in-BAW)
* and then queue a BAR.
*/
static void
ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
{
struct ieee80211_node *ni = bf->bf_node;
struct ath_node *an = ATH_NODE(ni);
int tid = bf->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
struct ieee80211_tx_ampdu *tap;
ATH_TX_LOCK(sc);
tap = ath_tx_get_tx_tid(an, tid);
/*
* If the buffer is marked as busy, we can't directly
* reuse it. Instead, try to clone the buffer.
* If the clone is successful, recycle the old buffer.
* If the clone is unsuccessful, set bfs_retries to max
* to force the next bit of code to free the buffer
* for us.
*/
if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
(bf->bf_flags & ATH_BUF_BUSY)) {
struct ath_buf *nbf;
nbf = ath_tx_retry_clone(sc, an, atid, bf);
if (nbf)
/* bf has been freed at this point */
bf = nbf;
else
bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
}
if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
"%s: exceeded retries; seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
sc->sc_stats.ast_tx_swretrymax++;
/* Update BAW anyway */
if (bf->bf_state.bfs_dobaw) {
ath_tx_update_baw(sc, an, atid, bf);
if (! bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
}
bf->bf_state.bfs_dobaw = 0;
/* Suspend the TX queue and get ready to send the BAR */
ath_tx_tid_bar_suspend(sc, atid);
/* Send the BAR if there are no other frames waiting */
if (ath_tx_tid_bar_tx_ready(sc, atid))
ath_tx_tid_bar_tx(sc, atid);
ATH_TX_UNLOCK(sc);
/* Free buffer, bf is free after this call */
ath_tx_default_comp(sc, bf, 0);
return;
}
/*
* This increments the retry counter as well as
* sets the retry flag in the ath_buf and packet
* body.
*/
ath_tx_set_retry(sc, bf);
sc->sc_stats.ast_tx_swretries++;
/*
* Insert this at the head of the queue, so it's
* retried before any current/subsequent frames.
*/
ATH_TID_INSERT_HEAD(atid, bf, bf_list);
ath_tx_tid_sched(sc, atid);
/* Send the BAR if there are no other frames waiting */
if (ath_tx_tid_bar_tx_ready(sc, atid))
ath_tx_tid_bar_tx(sc, atid);
ATH_TX_UNLOCK(sc);
}
/*
* Common code for aggregate excessive retry/subframe retry.
* If retrying, queues buffers to bf_q. If not, frees the
* buffers.
*
* XXX should unify this with ath_tx_aggr_retry_unaggr()
*/
static int
ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
ath_bufhead *bf_q)
{
struct ieee80211_node *ni = bf->bf_node;
struct ath_node *an = ATH_NODE(ni);
int tid = bf->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
ATH_TX_LOCK_ASSERT(sc);
/* XXX clr11naggr should be done for all subframes */
ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
/* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
/*
* If the buffer is marked as busy, we can't directly
* reuse it. Instead, try to clone the buffer.
* If the clone is successful, recycle the old buffer.
* If the clone is unsuccessful, set bfs_retries to max
* to force the next bit of code to free the buffer
* for us.
*/
if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
(bf->bf_flags & ATH_BUF_BUSY)) {
struct ath_buf *nbf;
nbf = ath_tx_retry_clone(sc, an, atid, bf);
if (nbf)
/* bf has been freed at this point */
bf = nbf;
else
bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
}
if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
sc->sc_stats.ast_tx_swretrymax++;
DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
"%s: max retries: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
ath_tx_update_baw(sc, an, atid, bf);
if (!bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
bf->bf_state.bfs_dobaw = 0;
return 1;
}
ath_tx_set_retry(sc, bf);
sc->sc_stats.ast_tx_swretries++;
bf->bf_next = NULL; /* Just to make sure */
/* Clear the aggregate state */
bf->bf_state.bfs_aggr = 0;
bf->bf_state.bfs_ndelim = 0; /* ??? needed? */
bf->bf_state.bfs_nframes = 1;
TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
return 0;
}
/*
* error pkt completion for an aggregate destination
*/
static void
ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
struct ath_tid *tid)
{
struct ieee80211_node *ni = bf_first->bf_node;
struct ath_node *an = ATH_NODE(ni);
struct ath_buf *bf_next, *bf;
ath_bufhead bf_q;
int drops = 0;
struct ieee80211_tx_ampdu *tap;
ath_bufhead bf_cq;
TAILQ_INIT(&bf_q);
TAILQ_INIT(&bf_cq);
/*
* Update rate control - all frames have failed.
*
* XXX use the length in the first frame in the series;
* XXX just so things are consistent for now.
*/
ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
&bf_first->bf_status.ds_txstat,
bf_first->bf_state.bfs_pktlen,
bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
ATH_TX_LOCK(sc);
tap = ath_tx_get_tx_tid(an, tid->tid);
sc->sc_stats.ast_tx_aggr_failall++;
/* Retry all subframes */
bf = bf_first;
while (bf) {
bf_next = bf->bf_next;
bf->bf_next = NULL; /* Remove it from the aggr list */
sc->sc_stats.ast_tx_aggr_fail++;
if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
drops++;
bf->bf_next = NULL;
TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
}
bf = bf_next;
}
/* Prepend all frames to the beginning of the queue */
while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
TAILQ_REMOVE(&bf_q, bf, bf_list);
ATH_TID_INSERT_HEAD(tid, bf, bf_list);
}
/*
* Schedule the TID to be re-tried.
*/
ath_tx_tid_sched(sc, tid);
/*
* send bar if we dropped any frames
*
* Keep the txq lock held for now, as we need to ensure
* that ni_txseqs[] is consistent (as it's being updated
* in the ifnet TX context or raw TX context.)
*/
if (drops) {
/* Suspend the TX queue and get ready to send the BAR */
ath_tx_tid_bar_suspend(sc, tid);
}
/*
* Send BAR if required
*/
if (ath_tx_tid_bar_tx_ready(sc, tid))
ath_tx_tid_bar_tx(sc, tid);
ATH_TX_UNLOCK(sc);
/* Complete frames which errored out */
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
TAILQ_REMOVE(&bf_cq, bf, bf_list);
ath_tx_default_comp(sc, bf, 0);
}
}
/*
* Handle clean-up of packets from an aggregate list.
*
* There's no need to update the BAW here - the session is being
* torn down.
*/
static void
ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
{
struct ath_buf *bf, *bf_next;
struct ieee80211_node *ni = bf_first->bf_node;
struct ath_node *an = ATH_NODE(ni);
int tid = bf_first->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
ATH_TX_LOCK(sc);
/* update incomp */
atid->incomp--;
/* Update the BAW */
bf = bf_first;
while (bf) {
/* XXX refactor! */
if (bf->bf_state.bfs_dobaw) {
ath_tx_update_baw(sc, an, atid, bf);
if (!bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
}
bf = bf->bf_next;
}
if (atid->incomp == 0) {
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: TID %d: cleaned up! resume!\n",
__func__, tid);
atid->cleanup_inprogress = 0;
ath_tx_tid_resume(sc, atid);
}
/* Send BAR if required */
/* XXX why would we send a BAR when transitioning to non-aggregation? */
/*
* XXX TODO: we should likely just tear down the BAR state here,
* rather than sending a BAR.
*/
if (ath_tx_tid_bar_tx_ready(sc, atid))
ath_tx_tid_bar_tx(sc, atid);
ATH_TX_UNLOCK(sc);
/* Handle frame completion as individual frames */
bf = bf_first;
while (bf) {
bf_next = bf->bf_next;
bf->bf_next = NULL;
ath_tx_default_comp(sc, bf, 1);
bf = bf_next;
}
}
/*
* Handle completion of an set of aggregate frames.
*
* Note: the completion handler is the last descriptor in the aggregate,
* not the last descriptor in the first frame.
*/
static void
ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
int fail)
{
//struct ath_desc *ds = bf->bf_lastds;
struct ieee80211_node *ni = bf_first->bf_node;
struct ath_node *an = ATH_NODE(ni);
int tid = bf_first->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
struct ath_tx_status ts;
struct ieee80211_tx_ampdu *tap;
ath_bufhead bf_q;
ath_bufhead bf_cq;
int seq_st, tx_ok;
int hasba, isaggr;
uint32_t ba[2];
struct ath_buf *bf, *bf_next;
int ba_index;
int drops = 0;
int nframes = 0, nbad = 0, nf;
int pktlen;
/* XXX there's too much on the stack? */
struct ath_rc_series rc[ATH_RC_NUM];
int txseq;
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
__func__, atid->hwq_depth);
/*
* Take a copy; this may be needed -after- bf_first
* has been completed and freed.
*/
ts = bf_first->bf_status.ds_txstat;
TAILQ_INIT(&bf_q);
TAILQ_INIT(&bf_cq);
/* The TID state is kept behind the TXQ lock */
ATH_TX_LOCK(sc);
atid->hwq_depth--;
if (atid->hwq_depth < 0)
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
__func__, atid->hwq_depth);
/*
* If the TID is filtered, handle completing the filter
* transition before potentially kicking it to the cleanup
* function.
*
* XXX this is duplicate work, ew.
*/
if (atid->isfiltered)
ath_tx_tid_filt_comp_complete(sc, atid);
/*
* Punt cleanup to the relevant function, not our problem now
*/
if (atid->cleanup_inprogress) {
if (atid->isfiltered)
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: isfiltered=1, normal_comp?\n",
__func__);
ATH_TX_UNLOCK(sc);
ath_tx_comp_cleanup_aggr(sc, bf_first);
return;
}
/*
* If the frame is filtered, transition to filtered frame
* mode and add this to the filtered frame list.
*
* XXX TODO: figure out how this interoperates with
* BAR, pause and cleanup states.
*/
if ((ts.ts_status & HAL_TXERR_FILT) ||
(ts.ts_status != 0 && atid->isfiltered)) {
if (fail != 0)
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: isfiltered=1, fail=%d\n", __func__, fail);
ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
/* Remove from BAW */
TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
if (bf->bf_state.bfs_addedbaw)
drops++;
if (bf->bf_state.bfs_dobaw) {
ath_tx_update_baw(sc, an, atid, bf);
if (!bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: wasn't added: seqno %d\n",
__func__,
SEQNO(bf->bf_state.bfs_seqno));
}
bf->bf_state.bfs_dobaw = 0;
}
/*
* If any intermediate frames in the BAW were dropped when
* handling filtering things, send a BAR.
*/
if (drops)
ath_tx_tid_bar_suspend(sc, atid);
/*
* Finish up by sending a BAR if required and freeing
* the frames outside of the TX lock.
*/
goto finish_send_bar;
}
/*
* XXX for now, use the first frame in the aggregate for
* XXX rate control completion; it's at least consistent.
*/
pktlen = bf_first->bf_state.bfs_pktlen;
/*
* Handle errors first!
*
* Here, handle _any_ error as a "exceeded retries" error.
* Later on (when filtered frames are to be specially handled)
* it'll have to be expanded.
*/
#if 0
if (ts.ts_status & HAL_TXERR_XRETRY) {
#endif
if (ts.ts_status != 0) {
ATH_TX_UNLOCK(sc);
ath_tx_comp_aggr_error(sc, bf_first, atid);
return;
}
tap = ath_tx_get_tx_tid(an, tid);
/*
* extract starting sequence and block-ack bitmap
*/
/* XXX endian-ness of seq_st, ba? */
seq_st = ts.ts_seqnum;
hasba = !! (ts.ts_flags & HAL_TX_BA);
tx_ok = (ts.ts_status == 0);
isaggr = bf_first->bf_state.bfs_aggr;
ba[0] = ts.ts_ba_low;
ba[1] = ts.ts_ba_high;
/*
* Copy the TX completion status and the rate control
* series from the first descriptor, as it may be freed
* before the rate control code can get its grubby fingers
* into things.
*/
memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
"isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
__func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
isaggr, seq_st, hasba, ba[0], ba[1]);
/*
* The reference driver doesn't do this; it simply ignores
* this check in its entirety.
*
* I've seen this occur when using iperf to send traffic
* out tid 1 - the aggregate frames are all marked as TID 1,
* but the TXSTATUS has TID=0. So, let's just ignore this
* check.
*/
#if 0
/* Occasionally, the MAC sends a tx status for the wrong TID. */
if (tid != ts.ts_tid) {
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
__func__, tid, ts.ts_tid);
tx_ok = 0;
}
#endif
/* AR5416 BA bug; this requires an interface reset */
if (isaggr && tx_ok && (! hasba)) {
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
"seq_st=%d\n",
__func__, hasba, tx_ok, isaggr, seq_st);
/* XXX TODO: schedule an interface reset */
#ifdef ATH_DEBUG
ath_printtxbuf(sc, bf_first,
sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
#endif
}
/*
* Walk the list of frames, figure out which ones were correctly
* sent and which weren't.
*/
bf = bf_first;
nf = bf_first->bf_state.bfs_nframes;
/* bf_first is going to be invalid once this list is walked */
bf_first = NULL;
/*
* Walk the list of completed frames and determine
* which need to be completed and which need to be
* retransmitted.
*
* For completed frames, the completion functions need
* to be called at the end of this function as the last
* node reference may free the node.
*
* Finally, since the TXQ lock can't be held during the
* completion callback (to avoid lock recursion),
* the completion calls have to be done outside of the
* lock.
*/
while (bf) {
nframes++;
ba_index = ATH_BA_INDEX(seq_st,
SEQNO(bf->bf_state.bfs_seqno));
bf_next = bf->bf_next;
bf->bf_next = NULL; /* Remove it from the aggr list */
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: checking bf=%p seqno=%d; ack=%d\n",
__func__, bf, SEQNO(bf->bf_state.bfs_seqno),
ATH_BA_ISSET(ba, ba_index));
if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
sc->sc_stats.ast_tx_aggr_ok++;
ath_tx_update_baw(sc, an, atid, bf);
bf->bf_state.bfs_dobaw = 0;
if (!bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
bf->bf_next = NULL;
TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
} else {
sc->sc_stats.ast_tx_aggr_fail++;
if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
drops++;
bf->bf_next = NULL;
TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
}
nbad++;
}
bf = bf_next;
}
/*
* Now that the BAW updates have been done, unlock
*
* txseq is grabbed before the lock is released so we
* have a consistent view of what -was- in the BAW.
* Anything after this point will not yet have been
* TXed.
*/
txseq = tap->txa_start;
ATH_TX_UNLOCK(sc);
if (nframes != nf)
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: num frames seen=%d; bf nframes=%d\n",
__func__, nframes, nf);
/*
* Now we know how many frames were bad, call the rate
* control code.
*/
if (fail == 0)
ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes,
nbad);
/*
* send bar if we dropped any frames
*/
if (drops) {
/* Suspend the TX queue and get ready to send the BAR */
ATH_TX_LOCK(sc);
ath_tx_tid_bar_suspend(sc, atid);
ATH_TX_UNLOCK(sc);
}
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: txa_start now %d\n", __func__, tap->txa_start);
ATH_TX_LOCK(sc);
/* Prepend all frames to the beginning of the queue */
while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
TAILQ_REMOVE(&bf_q, bf, bf_list);
ATH_TID_INSERT_HEAD(atid, bf, bf_list);
}
/*
* Reschedule to grab some further frames.
*/
ath_tx_tid_sched(sc, atid);
/*
* If the queue is filtered, re-schedule as required.
*
* This is required as there may be a subsequent TX descriptor
* for this end-node that has CLRDMASK set, so it's quite possible
* that a filtered frame will be followed by a non-filtered
* (complete or otherwise) frame.
*
* XXX should we do this before we complete the frame?
*/
if (atid->isfiltered)
ath_tx_tid_filt_comp_complete(sc, atid);
finish_send_bar:
/*
* Send BAR if required
*/
if (ath_tx_tid_bar_tx_ready(sc, atid))
ath_tx_tid_bar_tx(sc, atid);
ATH_TX_UNLOCK(sc);
/* Do deferred completion */
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
TAILQ_REMOVE(&bf_cq, bf, bf_list);
ath_tx_default_comp(sc, bf, 0);
}
}
/*
* Handle completion of unaggregated frames in an ADDBA
* session.
*
* Fail is set to 1 if the entry is being freed via a call to
* ath_tx_draintxq().
*/
static void
ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
{
struct ieee80211_node *ni = bf->bf_node;
struct ath_node *an = ATH_NODE(ni);
int tid = bf->bf_state.bfs_tid;
struct ath_tid *atid = &an->an_tid[tid];
struct ath_tx_status ts;
int drops = 0;
/*
* Take a copy of this; filtering/cloning the frame may free the
* bf pointer.
*/
ts = bf->bf_status.ds_txstat;
/*
* Update rate control status here, before we possibly
* punt to retry or cleanup.
*
* Do it outside of the TXQ lock.
*/
if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
&bf->bf_status.ds_txstat,
bf->bf_state.bfs_pktlen,
1, (ts.ts_status == 0) ? 0 : 1);
/*
* This is called early so atid->hwq_depth can be tracked.
* This unfortunately means that it's released and regrabbed
* during retry and cleanup. That's rather inefficient.
*/
ATH_TX_LOCK(sc);
if (tid == IEEE80211_NONQOS_TID)
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
__func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
SEQNO(bf->bf_state.bfs_seqno));
atid->hwq_depth--;
if (atid->hwq_depth < 0)
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
__func__, atid->hwq_depth);
/*
* If the TID is filtered, handle completing the filter
* transition before potentially kicking it to the cleanup
* function.
*/
if (atid->isfiltered)
ath_tx_tid_filt_comp_complete(sc, atid);
/*
* If a cleanup is in progress, punt to comp_cleanup;
* rather than handling it here. It's thus their
* responsibility to clean up, call the completion
* function in net80211, etc.
*/
if (atid->cleanup_inprogress) {
if (atid->isfiltered)
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: isfiltered=1, normal_comp?\n",
__func__);
ATH_TX_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
__func__);
ath_tx_comp_cleanup_unaggr(sc, bf);
return;
}
/*
* XXX TODO: how does cleanup, BAR and filtered frame handling
* overlap?
*
* If the frame is filtered OR if it's any failure but
* the TID is filtered, the frame must be added to the
* filtered frame list.
*
* However - a busy buffer can't be added to the filtered
* list as it will end up being recycled without having
* been made available for the hardware.
*/
if ((ts.ts_status & HAL_TXERR_FILT) ||
(ts.ts_status != 0 && atid->isfiltered)) {
int freeframe;
if (fail != 0)
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: isfiltered=1, fail=%d\n",
__func__, fail);
freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
/*
* If freeframe=0 then bf is no longer ours; don't
* touch it.
*/
if (freeframe) {
/* Remove from BAW */
if (bf->bf_state.bfs_addedbaw)
drops++;
if (bf->bf_state.bfs_dobaw) {
ath_tx_update_baw(sc, an, atid, bf);
if (!bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
}
bf->bf_state.bfs_dobaw = 0;
}
/*
* If the frame couldn't be filtered, treat it as a drop and
* prepare to send a BAR.
*/
if (freeframe && drops)
ath_tx_tid_bar_suspend(sc, atid);
/*
* Send BAR if required
*/
if (ath_tx_tid_bar_tx_ready(sc, atid))
ath_tx_tid_bar_tx(sc, atid);
ATH_TX_UNLOCK(sc);
/*
* If freeframe is set, then the frame couldn't be
* cloned and bf is still valid. Just complete/free it.
*/
if (freeframe)
ath_tx_default_comp(sc, bf, fail);
return;
}
/*
* Don't bother with the retry check if all frames
* are being failed (eg during queue deletion.)
*/
#if 0
if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
#endif
if (fail == 0 && ts.ts_status != 0) {
ATH_TX_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
__func__);
ath_tx_aggr_retry_unaggr(sc, bf);
return;
}
/* Success? Complete */
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
__func__, tid, SEQNO(bf->bf_state.bfs_seqno));
if (bf->bf_state.bfs_dobaw) {
ath_tx_update_baw(sc, an, atid, bf);
bf->bf_state.bfs_dobaw = 0;
if (!bf->bf_state.bfs_addedbaw)
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: wasn't added: seqno %d\n",
__func__, SEQNO(bf->bf_state.bfs_seqno));
}
/*
* If the queue is filtered, re-schedule as required.
*
* This is required as there may be a subsequent TX descriptor
* for this end-node that has CLRDMASK set, so it's quite possible
* that a filtered frame will be followed by a non-filtered
* (complete or otherwise) frame.
*
* XXX should we do this before we complete the frame?
*/
if (atid->isfiltered)
ath_tx_tid_filt_comp_complete(sc, atid);
/*
* Send BAR if required
*/
if (ath_tx_tid_bar_tx_ready(sc, atid))
ath_tx_tid_bar_tx(sc, atid);
ATH_TX_UNLOCK(sc);
ath_tx_default_comp(sc, bf, fail);
/* bf is freed at this point */
}
void
ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
{
if (bf->bf_state.bfs_aggr)
ath_tx_aggr_comp_aggr(sc, bf, fail);
else
ath_tx_aggr_comp_unaggr(sc, bf, fail);
}
/*
* Schedule some packets from the given node/TID to the hardware.
*
* This is the aggregate version.
*/
void
ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid)
{
struct ath_buf *bf;
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
struct ieee80211_tx_ampdu *tap;
ATH_AGGR_STATUS status;
ath_bufhead bf_q;
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
ATH_TX_LOCK_ASSERT(sc);
/*
* XXX TODO: If we're called for a queue that we're leaking frames to,
* ensure we only leak one.
*/
tap = ath_tx_get_tx_tid(an, tid->tid);
if (tid->tid == IEEE80211_NONQOS_TID)
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: called for TID=NONQOS_TID?\n", __func__);
for (;;) {
status = ATH_AGGR_DONE;
/*
* If the upper layer has paused the TID, don't
* queue any further packets.
*
* This can also occur from the completion task because
* of packet loss; but as its serialised with this code,
* it won't "appear" half way through queuing packets.
*/
if (! ath_tx_tid_can_tx_or_sched(sc, tid))
break;
bf = ATH_TID_FIRST(tid);
if (bf == NULL) {
break;
}
/*
* If the packet doesn't fall within the BAW (eg a NULL
* data frame), schedule it directly; continue.
*/
if (! bf->bf_state.bfs_dobaw) {
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: non-baw packet\n",
__func__);
ATH_TID_REMOVE(tid, bf, bf_list);
if (bf->bf_state.bfs_nframes > 1)
DPRINTF(sc, ATH_DEBUG_SW_TX,
"%s: aggr=%d, nframes=%d\n",
__func__,
bf->bf_state.bfs_aggr,
bf->bf_state.bfs_nframes);
/*
* This shouldn't happen - such frames shouldn't
* ever have been queued as an aggregate in the
* first place. However, make sure the fields
* are correctly setup just to be totally sure.
*/
bf->bf_state.bfs_aggr = 0;
bf->bf_state.bfs_nframes = 1;
/* Update CLRDMASK just before this frame is queued */
ath_tx_update_clrdmask(sc, tid, bf);
ath_tx_do_ratelookup(sc, bf);
ath_tx_calc_duration(sc, bf);
ath_tx_calc_protection(sc, bf);
ath_tx_set_rtscts(sc, bf);
ath_tx_rate_fill_rcflags(sc, bf);
ath_tx_setds(sc, bf);
ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
sc->sc_aggr_stats.aggr_nonbaw_pkt++;
/* Queue the packet; continue */
goto queuepkt;
}
TAILQ_INIT(&bf_q);
/*
* Do a rate control lookup on the first frame in the
* list. The rate control code needs that to occur
* before it can determine whether to TX.
* It's inaccurate because the rate control code doesn't
* really "do" aggregate lookups, so it only considers
* the size of the first frame.
*/
ath_tx_do_ratelookup(sc, bf);
bf->bf_state.bfs_rc[3].rix = 0;
bf->bf_state.bfs_rc[3].tries = 0;
ath_tx_calc_duration(sc, bf);
ath_tx_calc_protection(sc, bf);
ath_tx_set_rtscts(sc, bf);
ath_tx_rate_fill_rcflags(sc, bf);
status = ath_tx_form_aggr(sc, an, tid, &bf_q);
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: ath_tx_form_aggr() status=%d\n", __func__, status);
/*
* No frames to be picked up - out of BAW
*/
if (TAILQ_EMPTY(&bf_q))
break;
/*
* This assumes that the descriptor list in the ath_bufhead
* are already linked together via bf_next pointers.
*/
bf = TAILQ_FIRST(&bf_q);
if (status == ATH_AGGR_8K_LIMITED)
sc->sc_aggr_stats.aggr_rts_aggr_limited++;
/*
* If it's the only frame send as non-aggregate
* assume that ath_tx_form_aggr() has checked
* whether it's in the BAW and added it appropriately.
*/
if (bf->bf_state.bfs_nframes == 1) {
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: single-frame aggregate\n", __func__);
/* Update CLRDMASK just before this frame is queued */
ath_tx_update_clrdmask(sc, tid, bf);
bf->bf_state.bfs_aggr = 0;
bf->bf_state.bfs_ndelim = 0;
ath_tx_setds(sc, bf);
ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
if (status == ATH_AGGR_BAW_CLOSED)
sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
else
sc->sc_aggr_stats.aggr_single_pkt++;
} else {
DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
"%s: multi-frame aggregate: %d frames, "
"length %d\n",
__func__, bf->bf_state.bfs_nframes,
bf->bf_state.bfs_al);
bf->bf_state.bfs_aggr = 1;
sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
sc->sc_aggr_stats.aggr_aggr_pkt++;
/* Update CLRDMASK just before this frame is queued */
ath_tx_update_clrdmask(sc, tid, bf);
/*
* Calculate the duration/protection as required.
*/
ath_tx_calc_duration(sc, bf);
ath_tx_calc_protection(sc, bf);
/*
* Update the rate and rtscts information based on the
* rate decision made by the rate control code;
* the first frame in the aggregate needs it.
*/
ath_tx_set_rtscts(sc, bf);
/*
* Setup the relevant descriptor fields
* for aggregation. The first descriptor
* already points to the rest in the chain.
*/
ath_tx_setds_11n(sc, bf);
}
queuepkt:
/* Set completion handler, multi-frame aggregate or not */
bf->bf_comp = ath_tx_aggr_comp;
if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
/*
* Update leak count and frame config if were leaking frames.
*
* XXX TODO: it should update all frames in an aggregate
* correctly!
*/
ath_tx_leak_count_update(sc, tid, bf);
/* Punt to txq */
ath_tx_handoff(sc, txq, bf);
/* Track outstanding buffer count to hardware */
/* aggregates are "one" buffer */
tid->hwq_depth++;
/*
* Break out if ath_tx_form_aggr() indicated
* there can't be any further progress (eg BAW is full.)
* Checking for an empty txq is done above.
*
* XXX locking on txq here?
*/
/* XXX TXQ locking */
if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
(status == ATH_AGGR_BAW_CLOSED ||
status == ATH_AGGR_LEAK_CLOSED))
break;
}
}
/*
* Schedule some packets from the given node/TID to the hardware.
*
* XXX TODO: this routine doesn't enforce the maximum TXQ depth.
* It just dumps frames into the TXQ. We should limit how deep
* the transmit queue can grow for frames dispatched to the given
* TXQ.
*
* To avoid locking issues, either we need to own the TXQ lock
* at this point, or we need to pass in the maximum frame count
* from the caller.
*/
void
ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
struct ath_tid *tid)
{
struct ath_buf *bf;
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
__func__, an, tid->tid);
ATH_TX_LOCK_ASSERT(sc);
/* Check - is AMPDU pending or running? then print out something */
if (ath_tx_ampdu_pending(sc, an, tid->tid))
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
__func__, tid->tid);
if (ath_tx_ampdu_running(sc, an, tid->tid))
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
__func__, tid->tid);
for (;;) {
/*
* If the upper layers have paused the TID, don't
* queue any further packets.
*
* XXX if we are leaking frames, make sure we decrement
* that counter _and_ we continue here.
*/
if (! ath_tx_tid_can_tx_or_sched(sc, tid))
break;
bf = ATH_TID_FIRST(tid);
if (bf == NULL) {
break;
}
ATH_TID_REMOVE(tid, bf, bf_list);
/* Sanity check! */
if (tid->tid != bf->bf_state.bfs_tid) {
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
" tid %d\n", __func__, bf->bf_state.bfs_tid,
tid->tid);
}
/* Normal completion handler */
bf->bf_comp = ath_tx_normal_comp;
/*
* Override this for now, until the non-aggregate
* completion handler correctly handles software retransmits.
*/
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
/* Update CLRDMASK just before this frame is queued */
ath_tx_update_clrdmask(sc, tid, bf);
/* Program descriptors + rate control */
ath_tx_do_ratelookup(sc, bf);
ath_tx_calc_duration(sc, bf);
ath_tx_calc_protection(sc, bf);
ath_tx_set_rtscts(sc, bf);
ath_tx_rate_fill_rcflags(sc, bf);
ath_tx_setds(sc, bf);
/*
* Update the current leak count if
* we're leaking frames; and set the
* MORE flag as appropriate.
*/
ath_tx_leak_count_update(sc, tid, bf);
/* Track outstanding buffer count to hardware */
/* aggregates are "one" buffer */
tid->hwq_depth++;
/* Punt to hardware or software txq */
ath_tx_handoff(sc, txq, bf);
}
}
/*
* Schedule some packets to the given hardware queue.
*
* This function walks the list of TIDs (ie, ath_node TIDs
* with queued traffic) and attempts to schedule traffic
* from them.
*
* TID scheduling is implemented as a FIFO, with TIDs being
* added to the end of the queue after some frames have been
* scheduled.
*/
void
ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_tid *tid, *next, *last;
ATH_TX_LOCK_ASSERT(sc);
/*
* Don't schedule if the hardware queue is busy.
* This (hopefully) gives some more time to aggregate
* some packets in the aggregation queue.
*
* XXX It doesn't stop a parallel sender from sneaking
* in transmitting a frame!
*/
/* XXX TXQ locking */
if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
sc->sc_aggr_stats.aggr_sched_nopkt++;
return;
}
if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
sc->sc_aggr_stats.aggr_sched_nopkt++;
return;
}
last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
/*
* Suspend paused queues here; they'll be resumed
* once the addba completes or times out.
*/
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
__func__, tid->tid, tid->paused);
ath_tx_tid_unsched(sc, tid);
/*
* This node may be in power-save and we're leaking
* a frame; be careful.
*/
if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
goto loop_done;
}
if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
else
ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
/* Not empty? Re-schedule */
if (tid->axq_depth != 0)
ath_tx_tid_sched(sc, tid);
/*
* Give the software queue time to aggregate more
* packets. If we aren't running aggregation then
* we should still limit the hardware queue depth.
*/
/* XXX TXQ locking */
if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
break;
}
if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
break;
}
loop_done:
/*
* If this was the last entry on the original list, stop.
* Otherwise nodes that have been rescheduled onto the end
* of the TID FIFO list will just keep being rescheduled.
*
* XXX What should we do about nodes that were paused
* but are pending a leaking frame in response to a ps-poll?
* They'll be put at the front of the list; so they'll
* prematurely trigger this condition! Ew.
*/
if (tid == last)
break;
}
}
/*
* TX addba handling
*/
/*
* Return net80211 TID struct pointer, or NULL for none
*/
struct ieee80211_tx_ampdu *
ath_tx_get_tx_tid(struct ath_node *an, int tid)
{
struct ieee80211_node *ni = &an->an_node;
struct ieee80211_tx_ampdu *tap;
if (tid == IEEE80211_NONQOS_TID)
return NULL;
tap = &ni->ni_tx_ampdu[tid];
return tap;
}
/*
* Is AMPDU-TX running?
*/
static int
ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
{
struct ieee80211_tx_ampdu *tap;
if (tid == IEEE80211_NONQOS_TID)
return 0;
tap = ath_tx_get_tx_tid(an, tid);
if (tap == NULL)
return 0; /* Not valid; default to not running */
return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
}
/*
* Is AMPDU-TX negotiation pending?
*/
static int
ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
{
struct ieee80211_tx_ampdu *tap;
if (tid == IEEE80211_NONQOS_TID)
return 0;
tap = ath_tx_get_tx_tid(an, tid);
if (tap == NULL)
return 0; /* Not valid; default to not pending */
return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
}
/*
* Is AMPDU-TX pending for the given TID?
*/
/*
* Method to handle sending an ADDBA request.
*
* We tap this so the relevant flags can be set to pause the TID
* whilst waiting for the response.
*
* XXX there's no timeout handler we can override?
*/
int
ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
int dialogtoken, int baparamset, int batimeout)
{
struct ath_softc *sc = ni->ni_ic->ic_softc;
int tid = tap->txa_tid;
struct ath_node *an = ATH_NODE(ni);
struct ath_tid *atid = &an->an_tid[tid];
/*
* XXX danger Will Robinson!
*
* Although the taskqueue may be running and scheduling some more
* packets, these should all be _before_ the addba sequence number.
* However, net80211 will keep self-assigning sequence numbers
* until addba has been negotiated.
*
* In the past, these packets would be "paused" (which still works
* fine, as they're being scheduled to the driver in the same
* serialised method which is calling the addba request routine)
* and when the aggregation session begins, they'll be dequeued
* as aggregate packets and added to the BAW. However, now there's
* a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
* packets. Thus they never get included in the BAW tracking and
* this can cause the initial burst of packets after the addba
* negotiation to "hang", as they quickly fall outside the BAW.
*
* The "eventual" solution should be to tag these packets with
* dobaw. Although net80211 has given us a sequence number,
* it'll be "after" the left edge of the BAW and thus it'll
* fall within it.
*/
ATH_TX_LOCK(sc);
/*
* This is a bit annoying. Until net80211 HT code inherits some
* (any) locking, we may have this called in parallel BUT only
* one response/timeout will be called. Grr.
*/
if (atid->addba_tx_pending == 0) {
ath_tx_tid_pause(sc, atid);
atid->addba_tx_pending = 1;
}
ATH_TX_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
__func__,
ni->ni_macaddr,
":",
dialogtoken, baparamset, batimeout);
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: txa_start=%d, ni_txseqs=%d\n",
__func__, tap->txa_start, ni->ni_txseqs[tid]);
return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
batimeout);
}
/*
* Handle an ADDBA response.
*
* We unpause the queue so TX'ing can resume.
*
* Any packets TX'ed from this point should be "aggregate" (whether
* aggregate or not) so the BAW is updated.
*
* Note! net80211 keeps self-assigning sequence numbers until
* ampdu is negotiated. This means the initially-negotiated BAW left
* edge won't match the ni->ni_txseq.
*
* So, being very dirty, the BAW left edge is "slid" here to match
* ni->ni_txseq.
*
* What likely SHOULD happen is that all packets subsequent to the
* addba request should be tagged as aggregate and queued as non-aggregate
* frames; thus updating the BAW. For now though, I'll just slide the
* window.
*/
int
ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
int status, int code, int batimeout)
{
struct ath_softc *sc = ni->ni_ic->ic_softc;
int tid = tap->txa_tid;
struct ath_node *an = ATH_NODE(ni);
struct ath_tid *atid = &an->an_tid[tid];
int r;
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
ni->ni_macaddr,
":",
status, code, batimeout);
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: txa_start=%d, ni_txseqs=%d\n",
__func__, tap->txa_start, ni->ni_txseqs[tid]);
/*
* Call this first, so the interface flags get updated
* before the TID is unpaused. Otherwise a race condition
* exists where the unpaused TID still doesn't yet have
* IEEE80211_AGGR_RUNNING set.
*/
r = sc->sc_addba_response(ni, tap, status, code, batimeout);
ATH_TX_LOCK(sc);
atid->addba_tx_pending = 0;
/*
* XXX dirty!
* Slide the BAW left edge to wherever net80211 left it for us.
* Read above for more information.
*/
tap->txa_start = ni->ni_txseqs[tid];
ath_tx_tid_resume(sc, atid);
ATH_TX_UNLOCK(sc);
return r;
}
/*
* Stop ADDBA on a queue.
*
* This can be called whilst BAR TX is currently active on the queue,
* so make sure this is unblocked before continuing.
*/
void
ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
{
struct ath_softc *sc = ni->ni_ic->ic_softc;
int tid = tap->txa_tid;
struct ath_node *an = ATH_NODE(ni);
struct ath_tid *atid = &an->an_tid[tid];
ath_bufhead bf_cq;
struct ath_buf *bf;
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
__func__,
ni->ni_macaddr,
":");
/*
* Pause TID traffic early, so there aren't any races
* Unblock the pending BAR held traffic, if it's currently paused.
*/
ATH_TX_LOCK(sc);
ath_tx_tid_pause(sc, atid);
if (atid->bar_wait) {
/*
* bar_unsuspend() expects bar_tx == 1, as it should be
* called from the TX completion path. This quietens
* the warning. It's cleared for us anyway.
*/
atid->bar_tx = 1;
ath_tx_tid_bar_unsuspend(sc, atid);
}
ATH_TX_UNLOCK(sc);
/* There's no need to hold the TXQ lock here */
sc->sc_addba_stop(ni, tap);
/*
* ath_tx_tid_cleanup will resume the TID if possible, otherwise
* it'll set the cleanup flag, and it'll be unpaused once
* things have been cleaned up.
*/
TAILQ_INIT(&bf_cq);
ATH_TX_LOCK(sc);
/*
* In case there's a followup call to this, only call it
* if we don't have a cleanup in progress.
*
* Since we've paused the queue above, we need to make
* sure we unpause if there's already a cleanup in
* progress - it means something else is also doing
* this stuff, so we don't need to also keep it paused.
*/
if (atid->cleanup_inprogress) {
ath_tx_tid_resume(sc, atid);
} else {
ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
/*
* Unpause the TID if no cleanup is required.
*/
if (! atid->cleanup_inprogress)
ath_tx_tid_resume(sc, atid);
}
ATH_TX_UNLOCK(sc);
/* Handle completing frames and fail them */
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
TAILQ_REMOVE(&bf_cq, bf, bf_list);
ath_tx_default_comp(sc, bf, 1);
}
}
/*
* Handle a node reassociation.
*
* We may have a bunch of frames queued to the hardware; those need
* to be marked as cleanup.
*/
void
ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
{
struct ath_tid *tid;
int i;
ath_bufhead bf_cq;
struct ath_buf *bf;
TAILQ_INIT(&bf_cq);
ATH_TX_UNLOCK_ASSERT(sc);
ATH_TX_LOCK(sc);
for (i = 0; i < IEEE80211_TID_SIZE; i++) {
tid = &an->an_tid[i];
if (tid->hwq_depth == 0)
continue;
DPRINTF(sc, ATH_DEBUG_NODE,
"%s: %6D: TID %d: cleaning up TID\n",
__func__,
an->an_node.ni_macaddr,
":",
i);
/*
* In case there's a followup call to this, only call it
* if we don't have a cleanup in progress.
*/
if (! tid->cleanup_inprogress) {
ath_tx_tid_pause(sc, tid);
ath_tx_tid_cleanup(sc, an, i, &bf_cq);
/*
* Unpause the TID if no cleanup is required.
*/
if (! tid->cleanup_inprogress)
ath_tx_tid_resume(sc, tid);
}
}
ATH_TX_UNLOCK(sc);
/* Handle completing frames and fail them */
while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
TAILQ_REMOVE(&bf_cq, bf, bf_list);
ath_tx_default_comp(sc, bf, 1);
}
}
/*
* Note: net80211 bar_timeout() doesn't call this function on BAR failure;
* it simply tears down the aggregation session. Ew.
*
* It however will call ieee80211_ampdu_stop() which will call
* ic->ic_addba_stop().
*
* XXX This uses a hard-coded max BAR count value; the whole
* XXX BAR TX success or failure should be better handled!
*/
void
ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
int status)
{
struct ath_softc *sc = ni->ni_ic->ic_softc;
int tid = tap->txa_tid;
struct ath_node *an = ATH_NODE(ni);
struct ath_tid *atid = &an->an_tid[tid];
int attempts = tap->txa_attempts;
int old_txa_start;
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%d\n",
__func__,
ni->ni_macaddr,
":",
tap->txa_tid,
atid->tid,
status,
attempts,
tap->txa_start,
tap->txa_seqpending);
/* Note: This may update the BAW details */
/*
* XXX What if this does slide the BAW along? We need to somehow
* XXX either fix things when it does happen, or prevent the
* XXX seqpending value to be anything other than exactly what
* XXX the hell we want!
*
* XXX So for now, how I do this inside the TX lock for now
* XXX and just correct it afterwards? The below condition should
* XXX never happen and if it does I need to fix all kinds of things.
*/
ATH_TX_LOCK(sc);
old_txa_start = tap->txa_start;
sc->sc_bar_response(ni, tap, status);
if (tap->txa_start != old_txa_start) {
device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
__func__,
tid,
tap->txa_start,
old_txa_start);
}
tap->txa_start = old_txa_start;
ATH_TX_UNLOCK(sc);
/* Unpause the TID */
/*
* XXX if this is attempt=50, the TID will be downgraded
* XXX to a non-aggregate session. So we must unpause the
* XXX TID here or it'll never be done.
*
* Also, don't call it if bar_tx/bar_wait are 0; something
* has beaten us to the punch? (XXX figure out what?)
*/
if (status == 0 || attempts == 50) {
ATH_TX_LOCK(sc);
if (atid->bar_tx == 0 || atid->bar_wait == 0)
DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
"%s: huh? bar_tx=%d, bar_wait=%d\n",
__func__,
atid->bar_tx, atid->bar_wait);
else
ath_tx_tid_bar_unsuspend(sc, atid);
ATH_TX_UNLOCK(sc);
}
}
/*
* This is called whenever the pending ADDBA request times out.
* Unpause and reschedule the TID.
*/
void
ath_addba_response_timeout(struct ieee80211_node *ni,
struct ieee80211_tx_ampdu *tap)
{
struct ath_softc *sc = ni->ni_ic->ic_softc;
int tid = tap->txa_tid;
struct ath_node *an = ATH_NODE(ni);
struct ath_tid *atid = &an->an_tid[tid];
DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
"%s: %6D: TID=%d, called; resuming\n",
__func__,
ni->ni_macaddr,
":",
tid);
ATH_TX_LOCK(sc);
atid->addba_tx_pending = 0;
ATH_TX_UNLOCK(sc);
/* Note: This updates the aggregate state to (again) pending */
sc->sc_addba_response_timeout(ni, tap);
/* Unpause the TID; which reschedules it */
ATH_TX_LOCK(sc);
ath_tx_tid_resume(sc, atid);
ATH_TX_UNLOCK(sc);
}
/*
* Check if a node is asleep or not.
*/
int
ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
{
ATH_TX_LOCK_ASSERT(sc);
return (an->an_is_powersave);
}
/*
* Mark a node as currently "in powersaving."
* This suspends all traffic on the node.
*
* This must be called with the node/tx locks free.
*
* XXX TODO: the locking silliness below is due to how the node
* locking currently works. Right now, the node lock is grabbed
* to do rate control lookups and these are done with the TX
* queue lock held. This means the node lock can't be grabbed
* first here or a LOR will occur.
*
* Eventually (hopefully!) the TX path code will only grab
* the TXQ lock when transmitting and the ath_node lock when
* doing node/TID operations. There are other complications -
* the sched/unsched operations involve walking the per-txq
* 'active tid' list and this requires both locks to be held.
*/
void
ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
{
struct ath_tid *atid;
struct ath_txq *txq;
int tid;
ATH_TX_UNLOCK_ASSERT(sc);
/* Suspend all traffic on the node */
ATH_TX_LOCK(sc);
if (an->an_is_powersave) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: %6D: node was already asleep!\n",
__func__, an->an_node.ni_macaddr, ":");
ATH_TX_UNLOCK(sc);
return;
}
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
atid = &an->an_tid[tid];
txq = sc->sc_ac2q[atid->ac];
ath_tx_tid_pause(sc, atid);
}
/* Mark node as in powersaving */
an->an_is_powersave = 1;
ATH_TX_UNLOCK(sc);
}
/*
* Mark a node as currently "awake."
* This resumes all traffic to the node.
*/
void
ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
{
struct ath_tid *atid;
struct ath_txq *txq;
int tid;
ATH_TX_UNLOCK_ASSERT(sc);
ATH_TX_LOCK(sc);
/* !? */
if (an->an_is_powersave == 0) {
ATH_TX_UNLOCK(sc);
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: an=%p: node was already awake\n",
__func__, an);
return;
}
/* Mark node as awake */
an->an_is_powersave = 0;
/*
* Clear any pending leaked frame requests
*/
an->an_leak_count = 0;
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
atid = &an->an_tid[tid];
txq = sc->sc_ac2q[atid->ac];
ath_tx_tid_resume(sc, atid);
}
ATH_TX_UNLOCK(sc);
}
static int
ath_legacy_dma_txsetup(struct ath_softc *sc)
{
/* nothing new needed */
return (0);
}
static int
ath_legacy_dma_txteardown(struct ath_softc *sc)
{
/* nothing new needed */
return (0);
}
void
ath_xmit_setup_legacy(struct ath_softc *sc)
{
/*
* For now, just set the descriptor length to sizeof(ath_desc);
* worry about extracting the real length out of the HAL later.
*/
sc->sc_tx_desclen = sizeof(struct ath_desc);
sc->sc_tx_statuslen = sizeof(struct ath_desc);
sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
sc->sc_tx.xmit_drain = ath_legacy_tx_drain;
}
Index: head/sys/dev/ath/if_ath_tx_edma.c
===================================================================
--- head/sys/dev/ath/if_ath_tx_edma.c (revision 287196)
+++ head/sys/dev/ath/if_ath_tx_edma.c (revision 287197)
@@ -1,885 +1,875 @@
/*-
* Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Atheros Wireless LAN controller.
*
* This software is derived from work of Atsushi Onoe; his contribution
* is greatly appreciated.
*/
#include "opt_inet.h"
#include "opt_ath.h"
/*
* This is needed for register operations which are performed
* by the driver - eg, calls to ath_hal_gettsf32().
*
* It's also required for any AH_DEBUG checks in here, eg the
* module dependencies.
*/
#include "opt_ah.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/module.h>
#include <sys/ktr.h>
#include <sys/smp.h> /* for mp_ncpus */
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <dev/ath/if_athvar.h>
#include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
#include <dev/ath/ath_hal/ah_diagcodes.h>
#include <dev/ath/if_ath_debug.h>
#include <dev/ath/if_ath_misc.h>
#include <dev/ath/if_ath_tsf.h>
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_sysctl.h>
#include <dev/ath/if_ath_led.h>
#include <dev/ath/if_ath_keycache.h>
#include <dev/ath/if_ath_rx.h>
#include <dev/ath/if_ath_beacon.h>
#include <dev/ath/if_athdfs.h>
#ifdef ATH_TX99_DIAG
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
#include <dev/ath/if_ath_tx_edma.h>
#ifdef ATH_DEBUG_ALQ
#include <dev/ath/if_ath_alq.h>
#endif
/*
* some general macros
*/
#define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1)
#define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1)
/*
* XXX doesn't belong here, and should be tunable
*/
#define ATH_TXSTATUS_RING_SIZE 512
MALLOC_DECLARE(M_ATHDEV);
static void ath_edma_tx_processq(struct ath_softc *sc, int dosched);
/*
* Push some frames into the TX FIFO if we have space.
*/
static void
ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_buf *bf, *bf_last;
int i = 0;
ATH_TXQ_LOCK_ASSERT(txq);
DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n",
__func__,
txq->axq_qnum);
TAILQ_FOREACH(bf, &txq->axq_q, bf_list) {
if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH)
break;
/*
* We have space in the FIFO - so let's push a frame
* into it.
*/
/*
* Remove it from the normal list
*/
ATH_TXQ_REMOVE(txq, bf, bf_list);
/*
* XXX for now, we only dequeue a frame at a time, so
* that's only one buffer. Later on when we just
* push this staging _list_ into the queue, we'll
* set bf_last to the end pointer in the list.
*/
bf_last = bf;
DPRINTF(sc, ATH_DEBUG_TX_PROC,
"%s: Q%d: depth=%d; pushing %p->%p\n",
__func__,
txq->axq_qnum,
txq->axq_fifo_depth,
bf,
bf_last);
/*
* Append it to the FIFO staging list
*/
ATH_TXQ_INSERT_TAIL(&txq->fifo, bf, bf_list);
/*
* Set fifo start / fifo end flags appropriately
*
*/
bf->bf_flags |= ATH_BUF_FIFOPTR;
bf_last->bf_flags |= ATH_BUF_FIFOEND;
/*
* Push _into_ the FIFO.
*/
ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
#endif/* ATH_DEBUG */
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
ath_tx_alq_post(sc, bf);
#endif /* ATH_DEBUG_ALQ */
txq->axq_fifo_depth++;
i++;
}
if (i > 0)
ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
}
/*
* Re-initialise the DMA FIFO with the current contents of
* said TXQ.
*
* This should only be called as part of the chip reset path, as it
* assumes the FIFO is currently empty.
*/
static void
ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_buf *bf;
int i = 0;
int fifostart = 1;
int old_fifo_depth;
DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n",
__func__,
txq->axq_qnum);
ATH_TXQ_LOCK_ASSERT(txq);
/*
* Let's log if the tracked FIFO depth doesn't match
* what we actually push in.
*/
old_fifo_depth = txq->axq_fifo_depth;
txq->axq_fifo_depth = 0;
/*
* Walk the FIFO staging list, looking for "head" entries.
* Since we may have a partially completed list of frames,
* we push the first frame we see into the FIFO and re-mark
* it as the head entry. We then skip entries until we see
* FIFO end, at which point we get ready to push another
* entry into the FIFO.
*/
TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) {
/*
* If we're looking for FIFOEND and we haven't found
* it, skip.
*
* If we're looking for FIFOEND and we've found it,
* reset for another descriptor.
*/
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
#endif/* ATH_DEBUG */
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
ath_tx_alq_post(sc, bf);
#endif /* ATH_DEBUG_ALQ */
if (fifostart == 0) {
if (bf->bf_flags & ATH_BUF_FIFOEND)
fifostart = 1;
continue;
}
/* Make sure we're not overflowing the FIFO! */
if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) {
device_printf(sc->sc_dev,
"%s: Q%d: more frames in the queue; FIFO depth=%d?!\n",
__func__,
txq->axq_qnum,
txq->axq_fifo_depth);
}
#if 0
DPRINTF(sc, ATH_DEBUG_RESET,
"%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n",
__func__,
txq->axq_qnum,
txq->axq_fifo_depth,
bf,
!! (bf->bf_flags & ATH_BUF_FIFOPTR),
!! (bf->bf_flags & ATH_BUF_FIFOEND));
#endif
/*
* Set this to be the first buffer in the FIFO
* list - even if it's also the last buffer in
* a FIFO list!
*/
bf->bf_flags |= ATH_BUF_FIFOPTR;
/* Push it into the FIFO and bump the FIFO count */
ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
txq->axq_fifo_depth++;
/*
* If this isn't the last entry either, let's
* clear fifostart so we continue looking for
* said last entry.
*/
if (! (bf->bf_flags & ATH_BUF_FIFOEND))
fifostart = 0;
i++;
}
/* Only bother starting the queue if there's something in it */
if (i > 0)
ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n",
__func__,
txq->axq_qnum,
old_fifo_depth,
txq->axq_fifo_depth);
/* And now, let's check! */
if (txq->axq_fifo_depth != old_fifo_depth) {
device_printf(sc->sc_dev,
"%s: Q%d: FIFO depth should be %d, is %d\n",
__func__,
txq->axq_qnum,
old_fifo_depth,
txq->axq_fifo_depth);
}
}
/*
* Hand off this frame to a hardware queue.
*
* Things are a bit hairy in the EDMA world. The TX FIFO is only
* 8 entries deep, so we need to keep track of exactly what we've
* pushed into the FIFO and what's just sitting in the TX queue,
* waiting to go out.
*
* So this is split into two halves - frames get appended to the
* TXQ; then a scheduler is called to push some frames into the
* actual TX FIFO.
*/
static void
ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf)
{
ATH_TXQ_LOCK(txq);
KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
("%s: busy status 0x%x", __func__, bf->bf_flags));
/*
* XXX TODO: write a hard-coded check to ensure that
* the queue id in the TX descriptor matches txq->axq_qnum.
*/
/* Update aggr stats */
if (bf->bf_state.bfs_aggr)
txq->axq_aggr_depth++;
/* Push and update frame stats */
ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
/* For now, set the link pointer in the last descriptor
* to be NULL.
*
* Later on, when it comes time to handling multiple descriptors
* in one FIFO push, we can link descriptors together this way.
*/
/*
* Finally, call the FIFO schedule routine to schedule some
* frames to the FIFO.
*/
ath_edma_tx_fifo_fill(sc, txq);
ATH_TXQ_UNLOCK(txq);
}
/*
* Hand off this frame to a multicast software queue.
*
* The EDMA TX CABQ will get a list of chained frames, chained
* together using the next pointer. The single head of that
* particular queue is pushed to the hardware CABQ.
*/
static void
ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf)
{
ATH_TX_LOCK_ASSERT(sc);
KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
("%s: busy status 0x%x", __func__, bf->bf_flags));
ATH_TXQ_LOCK(txq);
/*
* XXX this is mostly duplicated in ath_tx_handoff_mcast().
*/
if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
struct ieee80211_frame *wh;
/* mark previous frame */
wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
/* re-sync buffer to memory */
bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
BUS_DMASYNC_PREWRITE);
/* link descriptor */
ath_hal_settxdesclink(sc->sc_ah,
bf_last->bf_lastds,
bf->bf_daddr);
}
#ifdef ATH_DEBUG_ALQ
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
ath_tx_alq_post(sc, bf);
#endif /* ATH_DEBUG_ALQ */
ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
ATH_TXQ_UNLOCK(txq);
}
/*
* Handoff this frame to the hardware.
*
* For the multicast queue, this will treat it as a software queue
* and append it to the list, after updating the MORE_DATA flag
* in the previous frame. The cabq processing code will ensure
* that the queue contents gets transferred over.
*
* For the hardware queues, this will queue a frame to the queue
* like before, then populate the FIFO from that. Since the
* EDMA hardware has 8 FIFO slots per TXQ, this ensures that
* frames such as management frames don't get prematurely dropped.
*
* This does imply that a similar flush-hwq-to-fifoq method will
* need to be called from the processq function, before the
* per-node software scheduler is called.
*/
static void
ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf)
{
DPRINTF(sc, ATH_DEBUG_XMIT_DESC,
"%s: called; bf=%p, txq=%p, qnum=%d\n",
__func__,
bf,
txq,
txq->axq_qnum);
if (txq->axq_qnum == ATH_TXQ_SWQ)
ath_edma_xmit_handoff_mcast(sc, txq, bf);
else
ath_edma_xmit_handoff_hw(sc, txq, bf);
}
static int
ath_edma_setup_txfifo(struct ath_softc *sc, int qnum)
{
struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
te->m_fifo = malloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH,
M_ATHDEV,
M_NOWAIT | M_ZERO);
if (te->m_fifo == NULL) {
device_printf(sc->sc_dev, "%s: malloc failed\n",
__func__);
return (-ENOMEM);
}
/*
* Set initial "empty" state.
*/
te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0;
return (0);
}
static int
ath_edma_free_txfifo(struct ath_softc *sc, int qnum)
{
struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
/* XXX TODO: actually deref the ath_buf entries? */
free(te->m_fifo, M_ATHDEV);
return (0);
}
static int
ath_edma_dma_txsetup(struct ath_softc *sc)
{
int error;
int i;
error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma,
NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE);
if (error != 0)
return (error);
ath_hal_setuptxstatusring(sc->sc_ah,
(void *) sc->sc_txsdma.dd_desc,
sc->sc_txsdma.dd_desc_paddr,
ATH_TXSTATUS_RING_SIZE);
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
ath_edma_setup_txfifo(sc, i);
}
return (0);
}
static int
ath_edma_dma_txteardown(struct ath_softc *sc)
{
int i;
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
ath_edma_free_txfifo(sc, i);
}
ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL);
return (0);
}
/*
* Drain all TXQs, potentially after completing the existing completed
* frames.
*/
static void
ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
{
- struct ifnet *ifp = sc->sc_ifp;
int i;
DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
(void) ath_stoptxdma(sc);
/*
* If reset type is noloss, the TX FIFO needs to be serviced
* and those frames need to be handled.
*
* Otherwise, just toss everything in each TX queue.
*/
if (reset_type == ATH_RESET_NOLOSS) {
ath_edma_tx_processq(sc, 0);
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
ATH_TXQ_LOCK(&sc->sc_txq[i]);
/*
* Free the holding buffer; DMA is now
* stopped.
*/
ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]);
/*
* Reset the link pointer to NULL; there's
* no frames to chain DMA to.
*/
sc->sc_txq[i].axq_link = NULL;
ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
}
}
} else {
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i))
ath_tx_draintxq(sc, &sc->sc_txq[i]);
}
}
/* XXX dump out the TX completion FIFO contents */
/* XXX dump out the frames */
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
sc->sc_wd_timer = 0;
}
/*
* TX completion tasklet.
*/
static void
ath_edma_tx_proc(void *arg, int npending)
{
struct ath_softc *sc = (struct ath_softc *) arg;
#if 0
DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n",
__func__, npending);
#endif
ath_edma_tx_processq(sc, 1);
}
/*
* Process the TX status queue.
*/
static void
ath_edma_tx_processq(struct ath_softc *sc, int dosched)
{
struct ath_hal *ah = sc->sc_ah;
HAL_STATUS status;
struct ath_tx_status ts;
struct ath_txq *txq;
struct ath_buf *bf;
struct ieee80211_node *ni;
int nacked = 0;
int idx;
#ifdef ATH_DEBUG
/* XXX */
uint32_t txstatus[32];
#endif
for (idx = 0; ; idx++) {
bzero(&ts, sizeof(ts));
ATH_TXSTATUS_LOCK(sc);
#ifdef ATH_DEBUG
ath_hal_gettxrawtxdesc(ah, txstatus);
#endif
status = ath_hal_txprocdesc(ah, NULL, (void *) &ts);
ATH_TXSTATUS_UNLOCK(sc);
if (status == HAL_EINPROGRESS)
break;
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_TX_PROC)
if (ts.ts_queue_id != sc->sc_bhalq)
ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id,
idx, (status == HAL_OK));
#endif
/*
* If there is an error with this descriptor, continue
* processing.
*
* XXX TBD: log some statistics?
*/
if (status == HAL_EIO) {
device_printf(sc->sc_dev, "%s: invalid TX status?\n",
__func__);
break;
}
#if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG)
if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS))
if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
sc->sc_tx_statuslen,
(char *) txstatus);
#endif /* ATH_DEBUG_ALQ */
/*
* At this point we have a valid status descriptor.
* The QID and descriptor ID (which currently isn't set)
* is part of the status.
*
* We then assume that the descriptor in question is the
* -head- of the given QID. Eventually we should verify
* this by using the descriptor ID.
*/
/*
* The beacon queue is not currently a "real" queue.
* Frames aren't pushed onto it and the lock isn't setup.
* So skip it for now; the beacon handling code will
* free and alloc more beacon buffers as appropriate.
*/
if (ts.ts_queue_id == sc->sc_bhalq)
continue;
txq = &sc->sc_txq[ts.ts_queue_id];
ATH_TXQ_LOCK(txq);
bf = ATH_TXQ_FIRST(&txq->fifo);
/*
* Work around the situation where I'm seeing notifications
* for Q1 when no frames are available. That needs to be
* debugged but not by crashing _here_.
*/
if (bf == NULL) {
device_printf(sc->sc_dev, "%s: Q%d: empty?\n",
__func__,
ts.ts_queue_id);
ATH_TXQ_UNLOCK(txq);
continue;
}
DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n",
__func__,
ts.ts_queue_id, bf,
!! (bf->bf_flags & ATH_BUF_FIFOPTR),
!! (bf->bf_flags & ATH_BUF_FIFOEND));
/* XXX TODO: actually output debugging info about this */
#if 0
/* XXX assert the buffer/descriptor matches the status descid */
if (ts.ts_desc_id != bf->bf_descid) {
device_printf(sc->sc_dev,
"%s: mismatched descid (qid=%d, tsdescid=%d, "
"bfdescid=%d\n",
__func__,
ts.ts_queue_id,
ts.ts_desc_id,
bf->bf_descid);
}
#endif
/* This removes the buffer and decrements the queue depth */
ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
if (bf->bf_state.bfs_aggr)
txq->axq_aggr_depth--;
/*
* If this was the end of a FIFO set, decrement FIFO depth
*/
if (bf->bf_flags & ATH_BUF_FIFOEND)
txq->axq_fifo_depth--;
/*
* If this isn't the final buffer in a FIFO set, mark
* the buffer as busy so it goes onto the holding queue.
*/
if (! (bf->bf_flags & ATH_BUF_FIFOEND))
bf->bf_flags |= ATH_BUF_BUSY;
DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n",
__func__,
txq->axq_qnum,
txq->axq_fifo_depth,
txq->fifo.axq_depth);
/* XXX assert FIFO depth >= 0 */
ATH_TXQ_UNLOCK(txq);
/*
* Outside of the TX lock - if the buffer is end
* end buffer in this FIFO, we don't need a holding
* buffer any longer.
*/
if (bf->bf_flags & ATH_BUF_FIFOEND) {
ATH_TXQ_LOCK(txq);
ath_txq_freeholdingbuf(sc, txq);
ATH_TXQ_UNLOCK(txq);
}
/*
* First we need to make sure ts_rate is valid.
*
* Pre-EDMA chips pass the whole TX descriptor to
* the proctxdesc function which will then fill out
* ts_rate based on the ts_finaltsi (final TX index)
* in the TX descriptor. However the TX completion
* FIFO doesn't have this information. So here we
* do a separate HAL call to populate that information.
*
* The same problem exists with ts_longretry.
* The FreeBSD HAL corrects ts_longretry in the HAL layer;
* the AR9380 HAL currently doesn't. So until the HAL
* is imported and this can be added, we correct for it
* here.
*/
/* XXX TODO */
/* XXX faked for now. Ew. */
if (ts.ts_finaltsi < 4) {
ts.ts_rate =
bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode;
switch (ts.ts_finaltsi) {
case 3: ts.ts_longretry +=
bf->bf_state.bfs_rc[2].tries;
case 2: ts.ts_longretry +=
bf->bf_state.bfs_rc[1].tries;
case 1: ts.ts_longretry +=
bf->bf_state.bfs_rc[0].tries;
}
} else {
device_printf(sc->sc_dev, "%s: finaltsi=%d\n",
__func__,
ts.ts_finaltsi);
ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode;
}
/*
* XXX This is terrible.
*
* Right now, some code uses the TX status that is
* passed in here, but the completion handlers in the
* software TX path also use bf_status.ds_txstat.
* Ew. That should all go away.
*
* XXX It's also possible the rate control completion
* routine is called twice.
*/
memcpy(&bf->bf_status, &ts, sizeof(ts));
ni = bf->bf_node;
/* Update RSSI */
/* XXX duplicate from ath_tx_processq */
if (ni != NULL && ts.ts_status == 0 &&
((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
nacked++;
sc->sc_stats.ast_tx_rssi = ts.ts_rssi;
ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
ts.ts_rssi);
}
/* Handle frame completion and rate control update */
ath_tx_process_buf_completion(sc, txq, &ts, bf);
/* bf is invalid at this point */
/*
* Now that there's space in the FIFO, let's push some
* more frames into it.
*/
ATH_TXQ_LOCK(txq);
if (dosched)
ath_edma_tx_fifo_fill(sc, txq);
ATH_TXQ_UNLOCK(txq);
}
sc->sc_wd_timer = 0;
-
- if (idx > 0) {
- IF_LOCK(&sc->sc_ifp->if_snd);
- sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- IF_UNLOCK(&sc->sc_ifp->if_snd);
- }
/* Kick software scheduler */
/*
* XXX It's inefficient to do this if the FIFO queue is full,
* but there's no easy way right now to only populate
* the txq task for _one_ TXQ. This should be fixed.
*/
if (dosched)
ath_tx_swq_kick(sc);
}
static void
ath_edma_attach_comp_func(struct ath_softc *sc)
{
TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc);
}
void
ath_xmit_setup_edma(struct ath_softc *sc)
{
/* Fetch EDMA field and buffer sizes */
(void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen);
(void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen);
(void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps);
if (bootverbose) {
device_printf(sc->sc_dev, "TX descriptor length: %d\n",
sc->sc_tx_desclen);
device_printf(sc->sc_dev, "TX status length: %d\n",
sc->sc_tx_statuslen);
device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n",
sc->sc_tx_nmaps);
}
sc->sc_tx.xmit_setup = ath_edma_dma_txsetup;
sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown;
sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func;
sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart;
sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff;
sc->sc_tx.xmit_drain = ath_edma_tx_drain;
}
Index: head/sys/dev/ath/if_athvar.h
===================================================================
--- head/sys/dev/ath/if_athvar.h (revision 287196)
+++ head/sys/dev/ath/if_athvar.h (revision 287197)
@@ -1,1521 +1,1522 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
/*
* Defintions for the Atheros Wireless LAN controller driver.
*/
#ifndef _DEV_ATH_ATHVAR_H
#define _DEV_ATH_ATHVAR_H
#include <machine/atomic.h>
#include <dev/ath/ath_hal/ah.h>
#include <dev/ath/ath_hal/ah_desc.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/ath/if_athioctl.h>
#include <dev/ath/if_athrate.h>
#ifdef ATH_DEBUG_ALQ
#include <dev/ath/if_ath_alq.h>
#endif
#define ATH_TIMEOUT 1000
/*
* There is a separate TX ath_buf pool for management frames.
* This ensures that management frames such as probe responses
* and BAR frames can be transmitted during periods of high
* TX activity.
*/
#define ATH_MGMT_TXBUF 32
/*
* 802.11n requires more TX and RX buffers to do AMPDU.
*/
#ifdef ATH_ENABLE_11N
#define ATH_TXBUF 512
#define ATH_RXBUF 512
#endif
#ifndef ATH_RXBUF
#define ATH_RXBUF 40 /* number of RX buffers */
#endif
#ifndef ATH_TXBUF
#define ATH_TXBUF 200 /* number of TX buffers */
#endif
#define ATH_BCBUF 4 /* number of beacon buffers */
#define ATH_TXDESC 10 /* number of descriptors per buffer */
#define ATH_TXMAXTRY 11 /* max number of transmit attempts */
#define ATH_TXMGTTRY 4 /* xmit attempts for mgt/ctl frames */
#define ATH_TXINTR_PERIOD 5 /* max number of batched tx descriptors */
#define ATH_BEACON_AIFS_DEFAULT 1 /* default aifs for ap beacon q */
#define ATH_BEACON_CWMIN_DEFAULT 0 /* default cwmin for ap beacon q */
#define ATH_BEACON_CWMAX_DEFAULT 0 /* default cwmax for ap beacon q */
/*
* The following bits can be set during the PCI (and perhaps non-PCI
* later) device probe path.
*
* It controls some of the driver and HAL behaviour.
*/
#define ATH_PCI_CUS198 0x0001
#define ATH_PCI_CUS230 0x0002
#define ATH_PCI_CUS217 0x0004
#define ATH_PCI_CUS252 0x0008
#define ATH_PCI_WOW 0x0010
#define ATH_PCI_BT_ANT_DIV 0x0020
#define ATH_PCI_D3_L1_WAR 0x0040
#define ATH_PCI_AR9565_1ANT 0x0080
#define ATH_PCI_AR9565_2ANT 0x0100
#define ATH_PCI_NO_PLL_PWRSAVE 0x0200
#define ATH_PCI_KILLER 0x0400
/*
* The key cache is used for h/w cipher state and also for
* tracking station state such as the current tx antenna.
* We also setup a mapping table between key cache slot indices
* and station state to short-circuit node lookups on rx.
* Different parts have different size key caches. We handle
* up to ATH_KEYMAX entries (could dynamically allocate state).
*/
#define ATH_KEYMAX 128 /* max key cache size we handle */
#define ATH_KEYBYTES (ATH_KEYMAX/NBBY) /* storage space in bytes */
struct taskqueue;
struct kthread;
struct ath_buf;
#define ATH_TID_MAX_BUFS (2 * IEEE80211_AGGR_BAWMAX)
/*
* Per-TID state
*
* Note that TID 16 (WME_NUM_TID+1) is for handling non-QoS frames.
*/
struct ath_tid {
TAILQ_HEAD(,ath_buf) tid_q; /* pending buffers */
struct ath_node *an; /* pointer to parent */
int tid; /* tid */
int ac; /* which AC gets this trafic */
int hwq_depth; /* how many buffers are on HW */
u_int axq_depth; /* SW queue depth */
struct {
TAILQ_HEAD(,ath_buf) tid_q; /* filtered queue */
u_int axq_depth; /* SW queue depth */
} filtq;
/*
* Entry on the ath_txq; when there's traffic
* to send
*/
TAILQ_ENTRY(ath_tid) axq_qelem;
int sched;
int paused; /* >0 if the TID has been paused */
/*
* These are flags - perhaps later collapse
* down to a single uint32_t ?
*/
int addba_tx_pending; /* TX ADDBA pending */
int bar_wait; /* waiting for BAR */
int bar_tx; /* BAR TXed */
int isfiltered; /* is this node currently filtered */
/*
* Is the TID being cleaned up after a transition
* from aggregation to non-aggregation?
* When this is set to 1, this TID will be paused
* and no further traffic will be queued until all
* the hardware packets pending for this TID have been
* TXed/completed; at which point (non-aggregation)
* traffic will resume being TXed.
*/
int cleanup_inprogress;
/*
* How many hardware-queued packets are
* waiting to be cleaned up.
* This is only valid if cleanup_inprogress is 1.
*/
int incomp;
/*
* The following implements a ring representing
* the frames in the current BAW.
* To avoid copying the array content each time
* the BAW is moved, the baw_head/baw_tail point
* to the current BAW begin/end; when the BAW is
* shifted the head/tail of the array are also
* appropriately shifted.
*/
/* active tx buffers, beginning at current BAW */
struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
/* where the baw head is in the array */
int baw_head;
/* where the BAW tail is in the array */
int baw_tail;
};
/* driver-specific node state */
struct ath_node {
struct ieee80211_node an_node; /* base class */
u_int8_t an_mgmtrix; /* min h/w rate index */
u_int8_t an_mcastrix; /* mcast h/w rate index */
uint32_t an_is_powersave; /* node is sleeping */
uint32_t an_stack_psq; /* net80211 psq isn't empty */
uint32_t an_tim_set; /* TIM has been set */
struct ath_buf *an_ff_buf[WME_NUM_AC]; /* ff staging area */
struct ath_tid an_tid[IEEE80211_TID_SIZE]; /* per-TID state */
char an_name[32]; /* eg "wlan0_a1" */
struct mtx an_mtx; /* protecting the rate control state */
uint32_t an_swq_depth; /* how many SWQ packets for this
node */
int clrdmask; /* has clrdmask been set */
uint32_t an_leak_count; /* How many frames to leak during pause */
/* variable-length rate control state follows */
};
#define ATH_NODE(ni) ((struct ath_node *)(ni))
#define ATH_NODE_CONST(ni) ((const struct ath_node *)(ni))
#define ATH_RSSI_LPF_LEN 10
#define ATH_RSSI_DUMMY_MARKER 0x127
#define ATH_EP_MUL(x, mul) ((x) * (mul))
#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), HAL_RSSI_EP_MULTIPLIER))
#define ATH_LPF_RSSI(x, y, len) \
((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))
#define ATH_RSSI_LPF(x, y) do { \
if ((y) >= -20) \
x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
} while (0)
#define ATH_EP_RND(x,mul) \
((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
#define ATH_RSSI(x) ATH_EP_RND(x, HAL_RSSI_EP_MULTIPLIER)
typedef enum {
ATH_BUFTYPE_NORMAL = 0,
ATH_BUFTYPE_MGMT = 1,
} ath_buf_type_t;
struct ath_buf {
TAILQ_ENTRY(ath_buf) bf_list;
struct ath_buf * bf_next; /* next buffer in the aggregate */
int bf_nseg;
HAL_STATUS bf_rxstatus;
uint16_t bf_flags; /* status flags (below) */
uint16_t bf_descid; /* 16 bit descriptor ID */
struct ath_desc *bf_desc; /* virtual addr of desc */
struct ath_desc_status bf_status; /* tx/rx status */
bus_addr_t bf_daddr; /* physical addr of desc */
bus_dmamap_t bf_dmamap; /* DMA map for mbuf chain */
struct mbuf *bf_m; /* mbuf for buf */
struct ieee80211_node *bf_node; /* pointer to the node */
struct ath_desc *bf_lastds; /* last descriptor for comp status */
struct ath_buf *bf_last; /* last buffer in aggregate, or self for non-aggregate */
bus_size_t bf_mapsize;
#define ATH_MAX_SCATTER ATH_TXDESC /* max(tx,rx,beacon) desc's */
bus_dma_segment_t bf_segs[ATH_MAX_SCATTER];
uint32_t bf_nextfraglen; /* length of next fragment */
/* Completion function to call on TX complete (fail or not) */
/*
* "fail" here is set to 1 if the queue entries were removed
* through a call to ath_tx_draintxq().
*/
void(* bf_comp) (struct ath_softc *sc, struct ath_buf *bf, int fail);
/* This state is kept to support software retries and aggregation */
struct {
uint16_t bfs_seqno; /* sequence number of this packet */
uint16_t bfs_ndelim; /* number of delims for padding */
uint8_t bfs_retries; /* retry count */
uint8_t bfs_tid; /* packet TID (or TID_MAX for no QoS) */
uint8_t bfs_nframes; /* number of frames in aggregate */
uint8_t bfs_pri; /* packet AC priority */
uint8_t bfs_tx_queue; /* destination hardware TX queue */
u_int32_t bfs_aggr:1, /* part of aggregate? */
bfs_aggrburst:1, /* part of aggregate burst? */
bfs_isretried:1, /* retried frame? */
bfs_dobaw:1, /* actually check against BAW? */
bfs_addedbaw:1, /* has been added to the BAW */
bfs_shpream:1, /* use short preamble */
bfs_istxfrag:1, /* is fragmented */
bfs_ismrr:1, /* do multi-rate TX retry */
bfs_doprot:1, /* do RTS/CTS based protection */
bfs_doratelookup:1; /* do rate lookup before each TX */
/*
* These fields are passed into the
* descriptor setup functions.
*/
/* Make this an 8 bit value? */
HAL_PKT_TYPE bfs_atype; /* packet type */
uint32_t bfs_pktlen; /* length of this packet */
uint16_t bfs_hdrlen; /* length of this packet header */
uint16_t bfs_al; /* length of aggregate */
uint16_t bfs_txflags; /* HAL (tx) descriptor flags */
uint8_t bfs_txrate0; /* first TX rate */
uint8_t bfs_try0; /* first try count */
uint16_t bfs_txpower; /* tx power */
uint8_t bfs_ctsrate0; /* Non-zero - use this as ctsrate */
uint8_t bfs_ctsrate; /* CTS rate */
/* 16 bit? */
int32_t bfs_keyix; /* crypto key index */
int32_t bfs_txantenna; /* TX antenna config */
/* Make this an 8 bit value? */
enum ieee80211_protmode bfs_protmode;
/* 16 bit? */
uint32_t bfs_ctsduration; /* CTS duration (pre-11n NICs) */
struct ath_rc_series bfs_rc[ATH_RC_NUM]; /* non-11n TX series */
} bf_state;
};
typedef TAILQ_HEAD(ath_bufhead_s, ath_buf) ath_bufhead;
#define ATH_BUF_MGMT 0x00000001 /* (tx) desc is a mgmt desc */
#define ATH_BUF_BUSY 0x00000002 /* (tx) desc owned by h/w */
#define ATH_BUF_FIFOEND 0x00000004
#define ATH_BUF_FIFOPTR 0x00000008
#define ATH_BUF_FLAGS_CLONE (ATH_BUF_MGMT)
/*
* DMA state for tx/rx descriptors.
*/
struct ath_descdma {
const char* dd_name;
struct ath_desc *dd_desc; /* descriptors */
int dd_descsize; /* size of single descriptor */
bus_addr_t dd_desc_paddr; /* physical addr of dd_desc */
bus_size_t dd_desc_len; /* size of dd_desc */
bus_dma_segment_t dd_dseg;
bus_dma_tag_t dd_dmat; /* bus DMA tag */
bus_dmamap_t dd_dmamap; /* DMA map for descriptors */
struct ath_buf *dd_bufptr; /* associated buffers */
};
/*
* Data transmit queue state. One of these exists for each
* hardware transmit queue. Packets sent to us from above
* are assigned to queues based on their priority. Not all
* devices support a complete set of hardware transmit queues.
* For those devices the array sc_ac2q will map multiple
* priorities to fewer hardware queues (typically all to one
* hardware queue).
*/
struct ath_txq {
struct ath_softc *axq_softc; /* Needed for scheduling */
u_int axq_qnum; /* hardware q number */
#define ATH_TXQ_SWQ (HAL_NUM_TX_QUEUES+1) /* qnum for s/w only queue */
u_int axq_ac; /* WME AC */
u_int axq_flags;
//#define ATH_TXQ_PUTPENDING 0x0001 /* ath_hal_puttxbuf pending */
#define ATH_TXQ_PUTRUNNING 0x0002 /* ath_hal_puttxbuf has been called */
u_int axq_depth; /* queue depth (stat only) */
u_int axq_aggr_depth; /* how many aggregates are queued */
u_int axq_intrcnt; /* interrupt count */
u_int32_t *axq_link; /* link ptr in last TX desc */
TAILQ_HEAD(axq_q_s, ath_buf) axq_q; /* transmit queue */
struct mtx axq_lock; /* lock on q and link */
/*
* This is the FIFO staging buffer when doing EDMA.
*
* For legacy chips, we just push the head pointer to
* the hardware and we ignore this list.
*
* For EDMA, the staging buffer is treated as normal;
* when it's time to push a list of frames to the hardware
* we move that list here and we stamp buffers with
* flags to identify the beginning/end of that particular
* FIFO entry.
*/
struct {
TAILQ_HEAD(axq_q_f_s, ath_buf) axq_q;
u_int axq_depth;
} fifo;
u_int axq_fifo_depth; /* depth of FIFO frames */
/*
* XXX the holdingbf field is protected by the TXBUF lock
* for now, NOT the TXQ lock.
*
* Architecturally, it would likely be better to move
* the holdingbf field to a separate array in ath_softc
* just to highlight that it's not protected by the normal
* TX path lock.
*/
struct ath_buf *axq_holdingbf; /* holding TX buffer */
char axq_name[12]; /* e.g. "ath0_txq4" */
/* Per-TID traffic queue for software -> hardware TX */
/*
* This is protected by the general TX path lock, not (for now)
* by the TXQ lock.
*/
TAILQ_HEAD(axq_t_s,ath_tid) axq_tidq;
};
#define ATH_TXQ_LOCK_INIT(_sc, _tq) do { \
snprintf((_tq)->axq_name, sizeof((_tq)->axq_name), "%s_txq%u", \
device_get_nameunit((_sc)->sc_dev), (_tq)->axq_qnum); \
mtx_init(&(_tq)->axq_lock, (_tq)->axq_name, NULL, MTX_DEF); \
} while (0)
#define ATH_TXQ_LOCK_DESTROY(_tq) mtx_destroy(&(_tq)->axq_lock)
#define ATH_TXQ_LOCK(_tq) mtx_lock(&(_tq)->axq_lock)
#define ATH_TXQ_UNLOCK(_tq) mtx_unlock(&(_tq)->axq_lock)
#define ATH_TXQ_LOCK_ASSERT(_tq) mtx_assert(&(_tq)->axq_lock, MA_OWNED)
#define ATH_TXQ_UNLOCK_ASSERT(_tq) mtx_assert(&(_tq)->axq_lock, \
MA_NOTOWNED)
#define ATH_NODE_LOCK(_an) mtx_lock(&(_an)->an_mtx)
#define ATH_NODE_UNLOCK(_an) mtx_unlock(&(_an)->an_mtx)
#define ATH_NODE_LOCK_ASSERT(_an) mtx_assert(&(_an)->an_mtx, MA_OWNED)
#define ATH_NODE_UNLOCK_ASSERT(_an) mtx_assert(&(_an)->an_mtx, \
MA_NOTOWNED)
/*
* These are for the hardware queue.
*/
#define ATH_TXQ_INSERT_HEAD(_tq, _elm, _field) do { \
TAILQ_INSERT_HEAD(&(_tq)->axq_q, (_elm), _field); \
(_tq)->axq_depth++; \
} while (0)
#define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \
TAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \
(_tq)->axq_depth++; \
} while (0)
#define ATH_TXQ_REMOVE(_tq, _elm, _field) do { \
TAILQ_REMOVE(&(_tq)->axq_q, _elm, _field); \
(_tq)->axq_depth--; \
} while (0)
#define ATH_TXQ_FIRST(_tq) TAILQ_FIRST(&(_tq)->axq_q)
#define ATH_TXQ_LAST(_tq, _field) TAILQ_LAST(&(_tq)->axq_q, _field)
/*
* These are for the TID software queue.
*/
#define ATH_TID_INSERT_HEAD(_tq, _elm, _field) do { \
TAILQ_INSERT_HEAD(&(_tq)->tid_q, (_elm), _field); \
(_tq)->axq_depth++; \
(_tq)->an->an_swq_depth++; \
} while (0)
#define ATH_TID_INSERT_TAIL(_tq, _elm, _field) do { \
TAILQ_INSERT_TAIL(&(_tq)->tid_q, (_elm), _field); \
(_tq)->axq_depth++; \
(_tq)->an->an_swq_depth++; \
} while (0)
#define ATH_TID_REMOVE(_tq, _elm, _field) do { \
TAILQ_REMOVE(&(_tq)->tid_q, _elm, _field); \
(_tq)->axq_depth--; \
(_tq)->an->an_swq_depth--; \
} while (0)
#define ATH_TID_FIRST(_tq) TAILQ_FIRST(&(_tq)->tid_q)
#define ATH_TID_LAST(_tq, _field) TAILQ_LAST(&(_tq)->tid_q, _field)
/*
* These are for the TID filtered frame queue
*/
#define ATH_TID_FILT_INSERT_HEAD(_tq, _elm, _field) do { \
TAILQ_INSERT_HEAD(&(_tq)->filtq.tid_q, (_elm), _field); \
(_tq)->axq_depth++; \
(_tq)->an->an_swq_depth++; \
} while (0)
#define ATH_TID_FILT_INSERT_TAIL(_tq, _elm, _field) do { \
TAILQ_INSERT_TAIL(&(_tq)->filtq.tid_q, (_elm), _field); \
(_tq)->axq_depth++; \
(_tq)->an->an_swq_depth++; \
} while (0)
#define ATH_TID_FILT_REMOVE(_tq, _elm, _field) do { \
TAILQ_REMOVE(&(_tq)->filtq.tid_q, _elm, _field); \
(_tq)->axq_depth--; \
(_tq)->an->an_swq_depth--; \
} while (0)
#define ATH_TID_FILT_FIRST(_tq) TAILQ_FIRST(&(_tq)->filtq.tid_q)
#define ATH_TID_FILT_LAST(_tq, _field) TAILQ_LAST(&(_tq)->filtq.tid_q,_field)
struct ath_vap {
struct ieee80211vap av_vap; /* base class */
int av_bslot; /* beacon slot index */
struct ath_buf *av_bcbuf; /* beacon buffer */
struct ieee80211_beacon_offsets av_boff;/* dynamic update state */
struct ath_txq av_mcastq; /* buffered mcast s/w queue */
void (*av_recv_mgmt)(struct ieee80211_node *,
struct mbuf *, int,
const struct ieee80211_rx_stats *, int, int);
int (*av_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
void (*av_bmiss)(struct ieee80211vap *);
void (*av_node_ps)(struct ieee80211_node *, int);
int (*av_set_tim)(struct ieee80211_node *, int);
void (*av_recv_pspoll)(struct ieee80211_node *,
struct mbuf *);
};
#define ATH_VAP(vap) ((struct ath_vap *)(vap))
struct taskqueue;
struct ath_tx99;
/*
* Whether to reset the TX/RX queue with or without
* a queue flush.
*/
typedef enum {
ATH_RESET_DEFAULT = 0,
ATH_RESET_NOLOSS = 1,
ATH_RESET_FULL = 2,
} ATH_RESET_TYPE;
struct ath_rx_methods {
void (*recv_sched_queue)(struct ath_softc *sc,
HAL_RX_QUEUE q, int dosched);
void (*recv_sched)(struct ath_softc *sc, int dosched);
void (*recv_stop)(struct ath_softc *sc, int dodelay);
int (*recv_start)(struct ath_softc *sc);
void (*recv_flush)(struct ath_softc *sc);
void (*recv_tasklet)(void *arg, int npending);
int (*recv_rxbuf_init)(struct ath_softc *sc,
struct ath_buf *bf);
int (*recv_setup)(struct ath_softc *sc);
int (*recv_teardown)(struct ath_softc *sc);
};
/*
* Represent the current state of the RX FIFO.
*/
struct ath_rx_edma {
struct ath_buf **m_fifo;
int m_fifolen;
int m_fifo_head;
int m_fifo_tail;
int m_fifo_depth;
struct mbuf *m_rxpending;
struct ath_buf *m_holdbf;
};
struct ath_tx_edma_fifo {
struct ath_buf **m_fifo;
int m_fifolen;
int m_fifo_head;
int m_fifo_tail;
int m_fifo_depth;
};
struct ath_tx_methods {
int (*xmit_setup)(struct ath_softc *sc);
int (*xmit_teardown)(struct ath_softc *sc);
void (*xmit_attach_comp_func)(struct ath_softc *sc);
void (*xmit_dma_restart)(struct ath_softc *sc,
struct ath_txq *txq);
void (*xmit_handoff)(struct ath_softc *sc,
struct ath_txq *txq, struct ath_buf *bf);
void (*xmit_drain)(struct ath_softc *sc,
ATH_RESET_TYPE reset_type);
};
struct ath_softc {
- struct ifnet *sc_ifp; /* interface common */
- struct ath_stats sc_stats; /* interface statistics */
+ struct ieee80211com sc_ic;
+ struct ath_stats sc_stats; /* device statistics */
struct ath_tx_aggr_stats sc_aggr_stats;
struct ath_intr_stats sc_intr_stats;
uint64_t sc_debug;
uint64_t sc_ktrdebug;
int sc_nvaps; /* # vaps */
int sc_nstavaps; /* # station vaps */
int sc_nmeshvaps; /* # mbss vaps */
u_int8_t sc_hwbssidmask[IEEE80211_ADDR_LEN];
u_int8_t sc_nbssid0; /* # vap's using base mac */
uint32_t sc_bssidmask; /* bssid mask */
struct ath_rx_methods sc_rx;
struct ath_rx_edma sc_rxedma[HAL_NUM_RX_QUEUES]; /* HP/LP queues */
ath_bufhead sc_rx_rxlist[HAL_NUM_RX_QUEUES]; /* deferred RX completion */
struct ath_tx_methods sc_tx;
struct ath_tx_edma_fifo sc_txedma[HAL_NUM_TX_QUEUES];
/*
* This is (currently) protected by the TX queue lock;
* it should migrate to a separate lock later
* so as to minimise contention.
*/
ath_bufhead sc_txbuf_list;
int sc_rx_statuslen;
int sc_tx_desclen;
int sc_tx_statuslen;
int sc_tx_nmaps; /* Number of TX maps */
int sc_edma_bufsize;
int sc_rx_stopped; /* XXX only for EDMA */
int sc_rx_resetted; /* XXX only for EDMA */
void (*sc_node_cleanup)(struct ieee80211_node *);
void (*sc_node_free)(struct ieee80211_node *);
device_t sc_dev;
HAL_BUS_TAG sc_st; /* bus space tag */
HAL_BUS_HANDLE sc_sh; /* bus space handle */
bus_dma_tag_t sc_dmat; /* bus DMA tag */
struct mtx sc_mtx; /* master lock (recursive) */
struct mtx sc_pcu_mtx; /* PCU access mutex */
char sc_pcu_mtx_name[32];
struct mtx sc_rx_mtx; /* RX access mutex */
char sc_rx_mtx_name[32];
struct mtx sc_tx_mtx; /* TX handling/comp mutex */
char sc_tx_mtx_name[32];
struct mtx sc_tx_ic_mtx; /* TX queue mutex */
char sc_tx_ic_mtx_name[32];
struct taskqueue *sc_tq; /* private task queue */
struct ath_hal *sc_ah; /* Atheros HAL */
struct ath_ratectrl *sc_rc; /* tx rate control support */
struct ath_tx99 *sc_tx99; /* tx99 adjunct state */
void (*sc_setdefantenna)(struct ath_softc *, u_int);
/*
* First set of flags.
*/
uint32_t sc_invalid : 1,/* disable hardware accesses */
sc_mrretry : 1,/* multi-rate retry support */
sc_mrrprot : 1,/* MRR + protection support */
sc_softled : 1,/* enable LED gpio status */
sc_hardled : 1,/* enable MAC LED status */
sc_splitmic : 1,/* split TKIP MIC keys */
sc_needmib : 1,/* enable MIB stats intr */
sc_diversity: 1,/* enable rx diversity */
sc_hasveol : 1,/* tx VEOL support */
sc_ledstate : 1,/* LED on/off state */
sc_blinking : 1,/* LED blink operation active */
sc_mcastkey : 1,/* mcast key cache search */
sc_scanning : 1,/* scanning active */
sc_syncbeacon:1,/* sync/resync beacon timers */
sc_hasclrkey: 1,/* CLR key supported */
sc_xchanmode: 1,/* extended channel mode */
sc_outdoor : 1,/* outdoor operation */
sc_dturbo : 1,/* dynamic turbo in use */
sc_hasbmask : 1,/* bssid mask support */
sc_hasbmatch: 1,/* bssid match disable support*/
sc_hastsfadd: 1,/* tsf adjust support */
sc_beacons : 1,/* beacons running */
sc_swbmiss : 1,/* sta mode using sw bmiss */
sc_stagbeacons:1,/* use staggered beacons */
sc_wmetkipmic:1,/* can do WME+TKIP MIC */
sc_resume_up: 1,/* on resume, start all vaps */
sc_tdma : 1,/* TDMA in use */
sc_setcca : 1,/* set/clr CCA with TDMA */
sc_resetcal : 1,/* reset cal state next trip */
sc_rxslink : 1,/* do self-linked final descriptor */
sc_rxtsf32 : 1,/* RX dec TSF is 32 bits */
sc_isedma : 1,/* supports EDMA */
sc_do_mybeacon : 1; /* supports mybeacon */
/*
* Second set of flags.
*/
- u_int32_t sc_use_ent : 1,
+ u_int32_t sc_running : 1, /* initialized */
+ sc_use_ent : 1,
sc_rx_stbc : 1,
sc_tx_stbc : 1,
sc_hasenforcetxop : 1, /* support enforce TxOP */
sc_hasdivcomb : 1, /* RX diversity combining */
sc_rx_lnamixer : 1; /* RX using LNA mixing */
int sc_cabq_enable; /* Enable cabq transmission */
/*
* Enterprise mode configuration for AR9380 and later chipsets.
*/
uint32_t sc_ent_cfg;
uint32_t sc_eerd; /* regdomain from EEPROM */
uint32_t sc_eecc; /* country code from EEPROM */
/* rate tables */
const HAL_RATE_TABLE *sc_rates[IEEE80211_MODE_MAX];
const HAL_RATE_TABLE *sc_currates; /* current rate table */
enum ieee80211_phymode sc_curmode; /* current phy mode */
HAL_OPMODE sc_opmode; /* current operating mode */
u_int16_t sc_curtxpow; /* current tx power limit */
u_int16_t sc_curaid; /* current association id */
struct ieee80211_channel *sc_curchan; /* current installed channel */
u_int8_t sc_curbssid[IEEE80211_ADDR_LEN];
u_int8_t sc_rixmap[256]; /* IEEE to h/w rate table ix */
struct {
u_int8_t ieeerate; /* IEEE rate */
u_int8_t rxflags; /* radiotap rx flags */
u_int8_t txflags; /* radiotap tx flags */
u_int16_t ledon; /* softled on time */
u_int16_t ledoff; /* softled off time */
} sc_hwmap[32]; /* h/w rate ix mappings */
u_int8_t sc_protrix; /* protection rate index */
u_int8_t sc_lastdatarix; /* last data frame rate index */
u_int sc_mcastrate; /* ieee rate for mcastrateix */
u_int sc_fftxqmin; /* min frames before staging */
u_int sc_fftxqmax; /* max frames before drop */
u_int sc_txantenna; /* tx antenna (fixed or auto) */
HAL_INT sc_imask; /* interrupt mask copy */
/*
* These are modified in the interrupt handler as well as
* the task queues and other contexts. Thus these must be
* protected by a mutex, or they could clash.
*
* For now, access to these is behind the ATH_LOCK,
* just to save time.
*/
uint32_t sc_txq_active; /* bitmap of active TXQs */
uint32_t sc_kickpcu; /* whether to kick the PCU */
uint32_t sc_rxproc_cnt; /* In RX processing */
uint32_t sc_txproc_cnt; /* In TX processing */
uint32_t sc_txstart_cnt; /* In TX output (raw/start) */
uint32_t sc_inreset_cnt; /* In active reset/chanchange */
uint32_t sc_txrx_cnt; /* refcount on stop/start'ing TX */
uint32_t sc_intr_cnt; /* refcount on interrupt handling */
u_int sc_keymax; /* size of key cache */
u_int8_t sc_keymap[ATH_KEYBYTES];/* key use bit map */
/*
* Software based LED blinking
*/
u_int sc_ledpin; /* GPIO pin for driving LED */
u_int sc_ledon; /* pin setting for LED on */
u_int sc_ledidle; /* idle polling interval */
int sc_ledevent; /* time of last LED event */
u_int8_t sc_txrix; /* current tx rate for LED */
u_int16_t sc_ledoff; /* off time for current blink */
struct callout sc_ledtimer; /* led off timer */
/*
* Hardware based LED blinking
*/
int sc_led_pwr_pin; /* MAC power LED GPIO pin */
int sc_led_net_pin; /* MAC network LED GPIO pin */
u_int sc_rfsilentpin; /* GPIO pin for rfkill int */
u_int sc_rfsilentpol; /* pin setting for rfkill on */
struct ath_descdma sc_rxdma; /* RX descriptors */
ath_bufhead sc_rxbuf; /* receive buffer */
u_int32_t *sc_rxlink; /* link ptr in last RX desc */
struct task sc_rxtask; /* rx int processing */
u_int8_t sc_defant; /* current default antenna */
u_int8_t sc_rxotherant; /* rx's on non-default antenna*/
u_int64_t sc_lastrx; /* tsf at last rx'd frame */
struct ath_rx_status *sc_lastrs; /* h/w status of last rx */
struct ath_rx_radiotap_header sc_rx_th;
int sc_rx_th_len;
u_int sc_monpass; /* frames to pass in mon.mode */
struct ath_descdma sc_txdma; /* TX descriptors */
uint16_t sc_txbuf_descid;
ath_bufhead sc_txbuf; /* transmit buffer */
int sc_txbuf_cnt; /* how many buffers avail */
struct ath_descdma sc_txdma_mgmt; /* mgmt TX descriptors */
ath_bufhead sc_txbuf_mgmt; /* mgmt transmit buffer */
struct ath_descdma sc_txsdma; /* EDMA TX status desc's */
struct mtx sc_txbuflock; /* txbuf lock */
char sc_txname[12]; /* e.g. "ath0_buf" */
u_int sc_txqsetup; /* h/w queues setup */
u_int sc_txintrperiod;/* tx interrupt batching */
struct ath_txq sc_txq[HAL_NUM_TX_QUEUES];
struct ath_txq *sc_ac2q[5]; /* WME AC -> h/w q map */
struct task sc_txtask; /* tx int processing */
struct task sc_txqtask; /* tx proc processing */
struct ath_descdma sc_txcompdma; /* TX EDMA completion */
struct mtx sc_txcomplock; /* TX EDMA completion lock */
char sc_txcompname[12]; /* eg ath0_txcomp */
int sc_wd_timer; /* count down for wd timer */
struct callout sc_wd_ch; /* tx watchdog timer */
struct ath_tx_radiotap_header sc_tx_th;
int sc_tx_th_len;
struct ath_descdma sc_bdma; /* beacon descriptors */
ath_bufhead sc_bbuf; /* beacon buffers */
u_int sc_bhalq; /* HAL q for outgoing beacons */
u_int sc_bmisscount; /* missed beacon transmits */
u_int32_t sc_ant_tx[8]; /* recent tx frames/antenna */
struct ath_txq *sc_cabq; /* tx q for cab frames */
struct task sc_bmisstask; /* bmiss int processing */
struct task sc_bstucktask; /* stuck beacon processing */
struct task sc_resettask; /* interface reset task */
struct task sc_fataltask; /* fatal task */
enum {
OK, /* no change needed */
UPDATE, /* update pending */
COMMIT /* beacon sent, commit change */
} sc_updateslot; /* slot time update fsm */
int sc_slotupdate; /* slot to advance fsm */
struct ieee80211vap *sc_bslot[ATH_BCBUF];
int sc_nbcnvaps; /* # vaps with beacons */
struct callout sc_cal_ch; /* callout handle for cals */
int sc_lastlongcal; /* last long cal completed */
int sc_lastcalreset;/* last cal reset done */
int sc_lastani; /* last ANI poll */
int sc_lastshortcal; /* last short calibration */
HAL_BOOL sc_doresetcal; /* Yes, we're doing a reset cal atm */
HAL_NODE_STATS sc_halstats; /* station-mode rssi stats */
u_int sc_tdmadbaprep; /* TDMA DBA prep time */
u_int sc_tdmaswbaprep;/* TDMA SWBA prep time */
u_int sc_tdmaswba; /* TDMA SWBA counter */
u_int32_t sc_tdmabintval; /* TDMA beacon interval (TU) */
u_int32_t sc_tdmaguard; /* TDMA guard time (usec) */
u_int sc_tdmaslotlen; /* TDMA slot length (usec) */
u_int32_t sc_avgtsfdeltap;/* TDMA slot adjust (+) */
u_int32_t sc_avgtsfdeltam;/* TDMA slot adjust (-) */
uint16_t *sc_eepromdata; /* Local eeprom data, if AR9100 */
uint32_t sc_txchainmask; /* hardware TX chainmask */
uint32_t sc_rxchainmask; /* hardware RX chainmask */
uint32_t sc_cur_txchainmask; /* currently configured TX chainmask */
uint32_t sc_cur_rxchainmask; /* currently configured RX chainmask */
uint32_t sc_rts_aggr_limit; /* TX limit on RTS aggregates */
int sc_aggr_limit; /* TX limit on all aggregates */
int sc_delim_min_pad; /* Minimum delimiter count */
/* Queue limits */
/*
* To avoid queue starvation in congested conditions,
* these parameters tune the maximum number of frames
* queued to the data/mcastq before they're dropped.
*
* This is to prevent:
* + a single destination overwhelming everything, including
* management/multicast frames;
* + multicast frames overwhelming everything (when the
* air is sufficiently busy that cabq can't drain.)
* + A node in powersave shouldn't be allowed to exhaust
* all available mbufs;
*
* These implement:
* + data_minfree is the maximum number of free buffers
* overall to successfully allow a data frame.
*
* + mcastq_maxdepth is the maximum depth allowed of the cabq.
*/
int sc_txq_node_maxdepth;
int sc_txq_data_minfree;
int sc_txq_mcastq_maxdepth;
int sc_txq_node_psq_maxdepth;
/*
* Software queue twiddles
*
* hwq_limit_nonaggr:
* when to begin limiting non-aggregate frames to the
* hardware queue, regardless of the TID.
* hwq_limit_aggr:
* when to begin limiting A-MPDU frames to the
* hardware queue, regardless of the TID.
* tid_hwq_lo: how low the per-TID hwq count has to be before the
* TID will be scheduled again
* tid_hwq_hi: how many frames to queue to the HWQ before the TID
* stops being scheduled.
*/
int sc_hwq_limit_nonaggr;
int sc_hwq_limit_aggr;
int sc_tid_hwq_lo;
int sc_tid_hwq_hi;
/* DFS related state */
void *sc_dfs; /* Used by an optional DFS module */
int sc_dodfs; /* Whether to enable DFS rx filter bits */
struct task sc_dfstask; /* DFS processing task */
/* Spectral related state */
void *sc_spectral;
int sc_dospectral;
/* LNA diversity related state */
void *sc_lna_div;
int sc_dolnadiv;
/* ALQ */
#ifdef ATH_DEBUG_ALQ
struct if_ath_alq sc_alq;
#endif
/* TX AMPDU handling */
int (*sc_addba_request)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int, int, int);
int (*sc_addba_response)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int, int, int);
void (*sc_addba_stop)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
void (*sc_addba_response_timeout)
(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
void (*sc_bar_response)(struct ieee80211_node *ni,
struct ieee80211_tx_ampdu *tap,
int status);
/*
* Powersave state tracking.
*
* target/cur powerstate is the chip power state.
* target selfgen state is the self-generated frames
* state. The chip can be awake but transmitted frames
* can have the PWRMGT bit set to 1 so the destination
* thinks the node is asleep.
*/
HAL_POWER_MODE sc_target_powerstate;
HAL_POWER_MODE sc_target_selfgen_state;
HAL_POWER_MODE sc_cur_powerstate;
int sc_powersave_refcnt;
/* ATH_PCI_* flags */
uint32_t sc_pci_devinfo;
};
#define ATH_LOCK_INIT(_sc) \
mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
NULL, MTX_DEF | MTX_RECURSE)
#define ATH_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
#define ATH_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define ATH_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define ATH_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
#define ATH_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED)
/*
* The TX lock is non-reentrant and serialises the TX frame send
* and completion operations.
*/
#define ATH_TX_LOCK_INIT(_sc) do {\
snprintf((_sc)->sc_tx_mtx_name, \
sizeof((_sc)->sc_tx_mtx_name), \
"%s TX lock", \
device_get_nameunit((_sc)->sc_dev)); \
mtx_init(&(_sc)->sc_tx_mtx, (_sc)->sc_tx_mtx_name, \
NULL, MTX_DEF); \
} while (0)
#define ATH_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_tx_mtx)
#define ATH_TX_LOCK(_sc) mtx_lock(&(_sc)->sc_tx_mtx)
#define ATH_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_tx_mtx)
#define ATH_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_mtx, \
MA_OWNED)
#define ATH_TX_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_mtx, \
MA_NOTOWNED)
#define ATH_TX_TRYLOCK(_sc) (mtx_owned(&(_sc)->sc_tx_mtx) != 0 && \
mtx_trylock(&(_sc)->sc_tx_mtx))
/*
* The IC TX lock is non-reentrant and serialises packet queuing from
* the upper layers.
*/
#define ATH_TX_IC_LOCK_INIT(_sc) do {\
snprintf((_sc)->sc_tx_ic_mtx_name, \
sizeof((_sc)->sc_tx_ic_mtx_name), \
"%s IC TX lock", \
device_get_nameunit((_sc)->sc_dev)); \
mtx_init(&(_sc)->sc_tx_ic_mtx, (_sc)->sc_tx_ic_mtx_name, \
NULL, MTX_DEF); \
} while (0)
#define ATH_TX_IC_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_tx_ic_mtx)
#define ATH_TX_IC_LOCK(_sc) mtx_lock(&(_sc)->sc_tx_ic_mtx)
#define ATH_TX_IC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_tx_ic_mtx)
#define ATH_TX_IC_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_ic_mtx, \
MA_OWNED)
#define ATH_TX_IC_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_ic_mtx, \
MA_NOTOWNED)
/*
* The PCU lock is non-recursive and should be treated as a spinlock.
* Although currently the interrupt code is run in netisr context and
* doesn't require this, this may change in the future.
* Please keep this in mind when protecting certain code paths
* with the PCU lock.
*
* The PCU lock is used to serialise access to the PCU so things such
* as TX, RX, state change (eg channel change), channel reset and updates
* from interrupt context (eg kickpcu, txqactive bits) do not clash.
*
* Although the current single-thread taskqueue mechanism protects the
* majority of these situations by simply serialising them, there are
* a few others which occur at the same time. These include the TX path
* (which only acquires ATH_LOCK when recycling buffers to the free list),
* ath_set_channel, the channel scanning API and perhaps quite a bit more.
*/
#define ATH_PCU_LOCK_INIT(_sc) do {\
snprintf((_sc)->sc_pcu_mtx_name, \
sizeof((_sc)->sc_pcu_mtx_name), \
"%s PCU lock", \
device_get_nameunit((_sc)->sc_dev)); \
mtx_init(&(_sc)->sc_pcu_mtx, (_sc)->sc_pcu_mtx_name, \
NULL, MTX_DEF); \
} while (0)
#define ATH_PCU_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_pcu_mtx)
#define ATH_PCU_LOCK(_sc) mtx_lock(&(_sc)->sc_pcu_mtx)
#define ATH_PCU_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_pcu_mtx)
#define ATH_PCU_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_pcu_mtx, \
MA_OWNED)
#define ATH_PCU_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_pcu_mtx, \
MA_NOTOWNED)
/*
* The RX lock is primarily a(nother) workaround to ensure that the
* RX FIFO/list isn't modified by various execution paths.
* Even though RX occurs in a single context (the ath taskqueue), the
* RX path can be executed via various reset/channel change paths.
*/
#define ATH_RX_LOCK_INIT(_sc) do {\
snprintf((_sc)->sc_rx_mtx_name, \
sizeof((_sc)->sc_rx_mtx_name), \
"%s RX lock", \
device_get_nameunit((_sc)->sc_dev)); \
mtx_init(&(_sc)->sc_rx_mtx, (_sc)->sc_rx_mtx_name, \
NULL, MTX_DEF); \
} while (0)
#define ATH_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_rx_mtx)
#define ATH_RX_LOCK(_sc) mtx_lock(&(_sc)->sc_rx_mtx)
#define ATH_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rx_mtx)
#define ATH_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rx_mtx, \
MA_OWNED)
#define ATH_RX_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rx_mtx, \
MA_NOTOWNED)
#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
#define ATH_TXBUF_LOCK_INIT(_sc) do { \
snprintf((_sc)->sc_txname, sizeof((_sc)->sc_txname), "%s_buf", \
device_get_nameunit((_sc)->sc_dev)); \
mtx_init(&(_sc)->sc_txbuflock, (_sc)->sc_txname, NULL, MTX_DEF); \
} while (0)
#define ATH_TXBUF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_txbuflock)
#define ATH_TXBUF_LOCK(_sc) mtx_lock(&(_sc)->sc_txbuflock)
#define ATH_TXBUF_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_txbuflock)
#define ATH_TXBUF_LOCK_ASSERT(_sc) \
mtx_assert(&(_sc)->sc_txbuflock, MA_OWNED)
#define ATH_TXBUF_UNLOCK_ASSERT(_sc) \
mtx_assert(&(_sc)->sc_txbuflock, MA_NOTOWNED)
#define ATH_TXSTATUS_LOCK_INIT(_sc) do { \
snprintf((_sc)->sc_txcompname, sizeof((_sc)->sc_txcompname), \
"%s_buf", \
device_get_nameunit((_sc)->sc_dev)); \
mtx_init(&(_sc)->sc_txcomplock, (_sc)->sc_txcompname, NULL, \
MTX_DEF); \
} while (0)
#define ATH_TXSTATUS_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_txcomplock)
#define ATH_TXSTATUS_LOCK(_sc) mtx_lock(&(_sc)->sc_txcomplock)
#define ATH_TXSTATUS_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_txcomplock)
#define ATH_TXSTATUS_LOCK_ASSERT(_sc) \
mtx_assert(&(_sc)->sc_txcomplock, MA_OWNED)
int ath_attach(u_int16_t, struct ath_softc *);
int ath_detach(struct ath_softc *);
void ath_resume(struct ath_softc *);
void ath_suspend(struct ath_softc *);
void ath_shutdown(struct ath_softc *);
void ath_intr(void *);
/*
* HAL definitions to comply with local coding convention.
*/
#define ath_hal_detach(_ah) \
((*(_ah)->ah_detach)((_ah)))
#define ath_hal_reset(_ah, _opmode, _chan, _outdoor, _pstatus) \
((*(_ah)->ah_reset)((_ah), (_opmode), (_chan), (_outdoor), (_pstatus)))
#define ath_hal_macversion(_ah) \
(((_ah)->ah_macVersion << 4) | ((_ah)->ah_macRev))
#define ath_hal_getratetable(_ah, _mode) \
((*(_ah)->ah_getRateTable)((_ah), (_mode)))
#define ath_hal_getmac(_ah, _mac) \
((*(_ah)->ah_getMacAddress)((_ah), (_mac)))
#define ath_hal_setmac(_ah, _mac) \
((*(_ah)->ah_setMacAddress)((_ah), (_mac)))
#define ath_hal_getbssidmask(_ah, _mask) \
((*(_ah)->ah_getBssIdMask)((_ah), (_mask)))
#define ath_hal_setbssidmask(_ah, _mask) \
((*(_ah)->ah_setBssIdMask)((_ah), (_mask)))
#define ath_hal_intrset(_ah, _mask) \
((*(_ah)->ah_setInterrupts)((_ah), (_mask)))
#define ath_hal_intrget(_ah) \
((*(_ah)->ah_getInterrupts)((_ah)))
#define ath_hal_intrpend(_ah) \
((*(_ah)->ah_isInterruptPending)((_ah)))
#define ath_hal_getisr(_ah, _pmask) \
((*(_ah)->ah_getPendingInterrupts)((_ah), (_pmask)))
#define ath_hal_updatetxtriglevel(_ah, _inc) \
((*(_ah)->ah_updateTxTrigLevel)((_ah), (_inc)))
#define ath_hal_setpower(_ah, _mode) \
((*(_ah)->ah_setPowerMode)((_ah), (_mode), AH_TRUE))
#define ath_hal_setselfgenpower(_ah, _mode) \
((*(_ah)->ah_setPowerMode)((_ah), (_mode), AH_FALSE))
#define ath_hal_keycachesize(_ah) \
((*(_ah)->ah_getKeyCacheSize)((_ah)))
#define ath_hal_keyreset(_ah, _ix) \
((*(_ah)->ah_resetKeyCacheEntry)((_ah), (_ix)))
#define ath_hal_keyset(_ah, _ix, _pk, _mac) \
((*(_ah)->ah_setKeyCacheEntry)((_ah), (_ix), (_pk), (_mac), AH_FALSE))
#define ath_hal_keyisvalid(_ah, _ix) \
(((*(_ah)->ah_isKeyCacheEntryValid)((_ah), (_ix))))
#define ath_hal_keysetmac(_ah, _ix, _mac) \
((*(_ah)->ah_setKeyCacheEntryMac)((_ah), (_ix), (_mac)))
#define ath_hal_getrxfilter(_ah) \
((*(_ah)->ah_getRxFilter)((_ah)))
#define ath_hal_setrxfilter(_ah, _filter) \
((*(_ah)->ah_setRxFilter)((_ah), (_filter)))
#define ath_hal_setmcastfilter(_ah, _mfilt0, _mfilt1) \
((*(_ah)->ah_setMulticastFilter)((_ah), (_mfilt0), (_mfilt1)))
#define ath_hal_waitforbeacon(_ah, _bf) \
((*(_ah)->ah_waitForBeaconDone)((_ah), (_bf)->bf_daddr))
#define ath_hal_putrxbuf(_ah, _bufaddr, _rxq) \
((*(_ah)->ah_setRxDP)((_ah), (_bufaddr), (_rxq)))
/* NB: common across all chips */
#define AR_TSF_L32 0x804c /* MAC local clock lower 32 bits */
#define ath_hal_gettsf32(_ah) \
OS_REG_READ(_ah, AR_TSF_L32)
#define ath_hal_gettsf64(_ah) \
((*(_ah)->ah_getTsf64)((_ah)))
#define ath_hal_settsf64(_ah, _val) \
((*(_ah)->ah_setTsf64)((_ah), (_val)))
#define ath_hal_resettsf(_ah) \
((*(_ah)->ah_resetTsf)((_ah)))
#define ath_hal_rxena(_ah) \
((*(_ah)->ah_enableReceive)((_ah)))
#define ath_hal_puttxbuf(_ah, _q, _bufaddr) \
((*(_ah)->ah_setTxDP)((_ah), (_q), (_bufaddr)))
#define ath_hal_gettxbuf(_ah, _q) \
((*(_ah)->ah_getTxDP)((_ah), (_q)))
#define ath_hal_numtxpending(_ah, _q) \
((*(_ah)->ah_numTxPending)((_ah), (_q)))
#define ath_hal_getrxbuf(_ah, _rxq) \
((*(_ah)->ah_getRxDP)((_ah), (_rxq)))
#define ath_hal_txstart(_ah, _q) \
((*(_ah)->ah_startTxDma)((_ah), (_q)))
#define ath_hal_setchannel(_ah, _chan) \
((*(_ah)->ah_setChannel)((_ah), (_chan)))
#define ath_hal_calibrate(_ah, _chan, _iqcal) \
((*(_ah)->ah_perCalibration)((_ah), (_chan), (_iqcal)))
#define ath_hal_calibrateN(_ah, _chan, _lcal, _isdone) \
((*(_ah)->ah_perCalibrationN)((_ah), (_chan), 0x1, (_lcal), (_isdone)))
#define ath_hal_calreset(_ah, _chan) \
((*(_ah)->ah_resetCalValid)((_ah), (_chan)))
#define ath_hal_setledstate(_ah, _state) \
((*(_ah)->ah_setLedState)((_ah), (_state)))
#define ath_hal_beaconinit(_ah, _nextb, _bperiod) \
((*(_ah)->ah_beaconInit)((_ah), (_nextb), (_bperiod)))
#define ath_hal_beaconreset(_ah) \
((*(_ah)->ah_resetStationBeaconTimers)((_ah)))
#define ath_hal_beaconsettimers(_ah, _bt) \
((*(_ah)->ah_setBeaconTimers)((_ah), (_bt)))
#define ath_hal_beacontimers(_ah, _bs) \
((*(_ah)->ah_setStationBeaconTimers)((_ah), (_bs)))
#define ath_hal_getnexttbtt(_ah) \
((*(_ah)->ah_getNextTBTT)((_ah)))
#define ath_hal_setassocid(_ah, _bss, _associd) \
((*(_ah)->ah_writeAssocid)((_ah), (_bss), (_associd)))
#define ath_hal_phydisable(_ah) \
((*(_ah)->ah_phyDisable)((_ah)))
#define ath_hal_setopmode(_ah) \
((*(_ah)->ah_setPCUConfig)((_ah)))
#define ath_hal_stoptxdma(_ah, _qnum) \
((*(_ah)->ah_stopTxDma)((_ah), (_qnum)))
#define ath_hal_stoppcurecv(_ah) \
((*(_ah)->ah_stopPcuReceive)((_ah)))
#define ath_hal_startpcurecv(_ah) \
((*(_ah)->ah_startPcuReceive)((_ah)))
#define ath_hal_stopdmarecv(_ah) \
((*(_ah)->ah_stopDmaReceive)((_ah)))
#define ath_hal_getdiagstate(_ah, _id, _indata, _insize, _outdata, _outsize) \
((*(_ah)->ah_getDiagState)((_ah), (_id), \
(_indata), (_insize), (_outdata), (_outsize)))
#define ath_hal_getfatalstate(_ah, _outdata, _outsize) \
ath_hal_getdiagstate(_ah, 29, NULL, 0, (_outdata), _outsize)
#define ath_hal_setuptxqueue(_ah, _type, _irq) \
((*(_ah)->ah_setupTxQueue)((_ah), (_type), (_irq)))
#define ath_hal_resettxqueue(_ah, _q) \
((*(_ah)->ah_resetTxQueue)((_ah), (_q)))
#define ath_hal_releasetxqueue(_ah, _q) \
((*(_ah)->ah_releaseTxQueue)((_ah), (_q)))
#define ath_hal_gettxqueueprops(_ah, _q, _qi) \
((*(_ah)->ah_getTxQueueProps)((_ah), (_q), (_qi)))
#define ath_hal_settxqueueprops(_ah, _q, _qi) \
((*(_ah)->ah_setTxQueueProps)((_ah), (_q), (_qi)))
/* NB: common across all chips */
#define AR_Q_TXE 0x0840 /* MAC Transmit Queue enable */
#define ath_hal_txqenabled(_ah, _qnum) \
(OS_REG_READ(_ah, AR_Q_TXE) & (1<<(_qnum)))
#define ath_hal_getrfgain(_ah) \
((*(_ah)->ah_getRfGain)((_ah)))
#define ath_hal_getdefantenna(_ah) \
((*(_ah)->ah_getDefAntenna)((_ah)))
#define ath_hal_setdefantenna(_ah, _ant) \
((*(_ah)->ah_setDefAntenna)((_ah), (_ant)))
#define ath_hal_rxmonitor(_ah, _arg, _chan) \
((*(_ah)->ah_rxMonitor)((_ah), (_arg), (_chan)))
#define ath_hal_ani_poll(_ah, _chan) \
((*(_ah)->ah_aniPoll)((_ah), (_chan)))
#define ath_hal_mibevent(_ah, _stats) \
((*(_ah)->ah_procMibEvent)((_ah), (_stats)))
#define ath_hal_setslottime(_ah, _us) \
((*(_ah)->ah_setSlotTime)((_ah), (_us)))
#define ath_hal_getslottime(_ah) \
((*(_ah)->ah_getSlotTime)((_ah)))
#define ath_hal_setacktimeout(_ah, _us) \
((*(_ah)->ah_setAckTimeout)((_ah), (_us)))
#define ath_hal_getacktimeout(_ah) \
((*(_ah)->ah_getAckTimeout)((_ah)))
#define ath_hal_setctstimeout(_ah, _us) \
((*(_ah)->ah_setCTSTimeout)((_ah), (_us)))
#define ath_hal_getctstimeout(_ah) \
((*(_ah)->ah_getCTSTimeout)((_ah)))
#define ath_hal_getcapability(_ah, _cap, _param, _result) \
((*(_ah)->ah_getCapability)((_ah), (_cap), (_param), (_result)))
#define ath_hal_setcapability(_ah, _cap, _param, _v, _status) \
((*(_ah)->ah_setCapability)((_ah), (_cap), (_param), (_v), (_status)))
#define ath_hal_ciphersupported(_ah, _cipher) \
(ath_hal_getcapability(_ah, HAL_CAP_CIPHER, _cipher, NULL) == HAL_OK)
#define ath_hal_getregdomain(_ah, _prd) \
(ath_hal_getcapability(_ah, HAL_CAP_REG_DMN, 0, (_prd)) == HAL_OK)
#define ath_hal_setregdomain(_ah, _rd) \
ath_hal_setcapability(_ah, HAL_CAP_REG_DMN, 0, _rd, NULL)
#define ath_hal_getcountrycode(_ah, _pcc) \
(*(_pcc) = (_ah)->ah_countryCode)
#define ath_hal_gettkipmic(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_TKIP_MIC, 1, NULL) == HAL_OK)
#define ath_hal_settkipmic(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_TKIP_MIC, 1, _v, NULL)
#define ath_hal_hastkipsplit(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_TKIP_SPLIT, 0, NULL) == HAL_OK)
#define ath_hal_gettkipsplit(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_TKIP_SPLIT, 1, NULL) == HAL_OK)
#define ath_hal_settkipsplit(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_TKIP_SPLIT, 1, _v, NULL)
#define ath_hal_haswmetkipmic(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_WME_TKIPMIC, 0, NULL) == HAL_OK)
#define ath_hal_hwphycounters(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_PHYCOUNTERS, 0, NULL) == HAL_OK)
#define ath_hal_hasdiversity(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_DIVERSITY, 0, NULL) == HAL_OK)
#define ath_hal_getdiversity(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_DIVERSITY, 1, NULL) == HAL_OK)
#define ath_hal_setdiversity(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_DIVERSITY, 1, _v, NULL)
#define ath_hal_getantennaswitch(_ah) \
((*(_ah)->ah_getAntennaSwitch)((_ah)))
#define ath_hal_setantennaswitch(_ah, _v) \
((*(_ah)->ah_setAntennaSwitch)((_ah), (_v)))
#define ath_hal_getdiag(_ah, _pv) \
(ath_hal_getcapability(_ah, HAL_CAP_DIAG, 0, _pv) == HAL_OK)
#define ath_hal_setdiag(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_DIAG, 0, _v, NULL)
#define ath_hal_getnumtxqueues(_ah, _pv) \
(ath_hal_getcapability(_ah, HAL_CAP_NUM_TXQUEUES, 0, _pv) == HAL_OK)
#define ath_hal_hasveol(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_VEOL, 0, NULL) == HAL_OK)
#define ath_hal_hastxpowlimit(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 0, NULL) == HAL_OK)
#define ath_hal_settxpowlimit(_ah, _pow) \
((*(_ah)->ah_setTxPowerLimit)((_ah), (_pow)))
#define ath_hal_gettxpowlimit(_ah, _ppow) \
(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 1, _ppow) == HAL_OK)
#define ath_hal_getmaxtxpow(_ah, _ppow) \
(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 2, _ppow) == HAL_OK)
#define ath_hal_gettpscale(_ah, _scale) \
(ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 3, _scale) == HAL_OK)
#define ath_hal_settpscale(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_TXPOW, 3, _v, NULL)
#define ath_hal_hastpc(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_TPC, 0, NULL) == HAL_OK)
#define ath_hal_gettpc(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_TPC, 1, NULL) == HAL_OK)
#define ath_hal_settpc(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_TPC, 1, _v, NULL)
#define ath_hal_hasbursting(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_BURST, 0, NULL) == HAL_OK)
#define ath_hal_setmcastkeysearch(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 0, _v, NULL)
#define ath_hal_hasmcastkeysearch(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 0, NULL) == HAL_OK)
#define ath_hal_getmcastkeysearch(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 1, NULL) == HAL_OK)
#define ath_hal_hasfastframes(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_FASTFRAME, 0, NULL) == HAL_OK)
#define ath_hal_hasbssidmask(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_BSSIDMASK, 0, NULL) == HAL_OK)
#define ath_hal_hasbssidmatch(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_BSSIDMATCH, 0, NULL) == HAL_OK)
#define ath_hal_hastsfadjust(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_TSF_ADJUST, 0, NULL) == HAL_OK)
#define ath_hal_gettsfadjust(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_TSF_ADJUST, 1, NULL) == HAL_OK)
#define ath_hal_settsfadjust(_ah, _onoff) \
ath_hal_setcapability(_ah, HAL_CAP_TSF_ADJUST, 1, _onoff, NULL)
#define ath_hal_hasrfsilent(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 0, NULL) == HAL_OK)
#define ath_hal_getrfkill(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 1, NULL) == HAL_OK)
#define ath_hal_setrfkill(_ah, _onoff) \
ath_hal_setcapability(_ah, HAL_CAP_RFSILENT, 1, _onoff, NULL)
#define ath_hal_getrfsilent(_ah, _prfsilent) \
(ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 2, _prfsilent) == HAL_OK)
#define ath_hal_setrfsilent(_ah, _rfsilent) \
ath_hal_setcapability(_ah, HAL_CAP_RFSILENT, 2, _rfsilent, NULL)
#define ath_hal_gettpack(_ah, _ptpack) \
(ath_hal_getcapability(_ah, HAL_CAP_TPC_ACK, 0, _ptpack) == HAL_OK)
#define ath_hal_settpack(_ah, _tpack) \
ath_hal_setcapability(_ah, HAL_CAP_TPC_ACK, 0, _tpack, NULL)
#define ath_hal_gettpcts(_ah, _ptpcts) \
(ath_hal_getcapability(_ah, HAL_CAP_TPC_CTS, 0, _ptpcts) == HAL_OK)
#define ath_hal_settpcts(_ah, _tpcts) \
ath_hal_setcapability(_ah, HAL_CAP_TPC_CTS, 0, _tpcts, NULL)
#define ath_hal_hasintmit(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_INTMIT, \
HAL_CAP_INTMIT_PRESENT, NULL) == HAL_OK)
#define ath_hal_getintmit(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_INTMIT, \
HAL_CAP_INTMIT_ENABLE, NULL) == HAL_OK)
#define ath_hal_setintmit(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_INTMIT, \
HAL_CAP_INTMIT_ENABLE, _v, NULL)
#define ath_hal_hasmybeacon(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_DO_MYBEACON, 1, NULL) == HAL_OK)
#define ath_hal_hasenforcetxop(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_ENFORCE_TXOP, 0, NULL) == HAL_OK)
#define ath_hal_getenforcetxop(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_ENFORCE_TXOP, 1, NULL) == HAL_OK)
#define ath_hal_setenforcetxop(_ah, _v) \
ath_hal_setcapability(_ah, HAL_CAP_ENFORCE_TXOP, 1, _v, NULL)
#define ath_hal_hasrxlnamixer(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_RX_LNA_MIXING, 0, NULL) == HAL_OK)
#define ath_hal_hasdivantcomb(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_ANT_DIV_COMB, 0, NULL) == HAL_OK)
/* EDMA definitions */
#define ath_hal_hasedma(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_ENHANCED_DMA_SUPPORT, \
0, NULL) == HAL_OK)
#define ath_hal_getrxfifodepth(_ah, _qtype, _req) \
(ath_hal_getcapability(_ah, HAL_CAP_RXFIFODEPTH, _qtype, _req) \
== HAL_OK)
#define ath_hal_getntxmaps(_ah, _req) \
(ath_hal_getcapability(_ah, HAL_CAP_NUM_TXMAPS, 0, _req) \
== HAL_OK)
#define ath_hal_gettxdesclen(_ah, _req) \
(ath_hal_getcapability(_ah, HAL_CAP_TXDESCLEN, 0, _req) \
== HAL_OK)
#define ath_hal_gettxstatuslen(_ah, _req) \
(ath_hal_getcapability(_ah, HAL_CAP_TXSTATUSLEN, 0, _req) \
== HAL_OK)
#define ath_hal_getrxstatuslen(_ah, _req) \
(ath_hal_getcapability(_ah, HAL_CAP_RXSTATUSLEN, 0, _req) \
== HAL_OK)
#define ath_hal_setrxbufsize(_ah, _req) \
(ath_hal_setcapability(_ah, HAL_CAP_RXBUFSIZE, 0, _req, NULL) \
== HAL_OK)
#define ath_hal_getchannoise(_ah, _c) \
((*(_ah)->ah_getChanNoise)((_ah), (_c)))
/* 802.11n HAL methods */
#define ath_hal_getrxchainmask(_ah, _prxchainmask) \
(ath_hal_getcapability(_ah, HAL_CAP_RX_CHAINMASK, 0, _prxchainmask))
#define ath_hal_gettxchainmask(_ah, _ptxchainmask) \
(ath_hal_getcapability(_ah, HAL_CAP_TX_CHAINMASK, 0, _ptxchainmask))
#define ath_hal_setrxchainmask(_ah, _rx) \
(ath_hal_setcapability(_ah, HAL_CAP_RX_CHAINMASK, 1, _rx, NULL))
#define ath_hal_settxchainmask(_ah, _tx) \
(ath_hal_setcapability(_ah, HAL_CAP_TX_CHAINMASK, 1, _tx, NULL))
#define ath_hal_split4ktrans(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_SPLIT_4KB_TRANS, \
0, NULL) == HAL_OK)
#define ath_hal_self_linked_final_rxdesc(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_RXDESC_SELFLINK, \
0, NULL) == HAL_OK)
#define ath_hal_gtxto_supported(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_GTXTO, 0, NULL) == HAL_OK)
#define ath_hal_has_long_rxdesc_tsf(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_LONG_RXDESC_TSF, \
0, NULL) == HAL_OK)
#define ath_hal_setuprxdesc(_ah, _ds, _size, _intreq) \
((*(_ah)->ah_setupRxDesc)((_ah), (_ds), (_size), (_intreq)))
#define ath_hal_rxprocdesc(_ah, _ds, _dspa, _dsnext, _rs) \
((*(_ah)->ah_procRxDesc)((_ah), (_ds), (_dspa), (_dsnext), 0, (_rs)))
#define ath_hal_setuptxdesc(_ah, _ds, _plen, _hlen, _atype, _txpow, \
_txr0, _txtr0, _keyix, _ant, _flags, \
_rtsrate, _rtsdura) \
((*(_ah)->ah_setupTxDesc)((_ah), (_ds), (_plen), (_hlen), (_atype), \
(_txpow), (_txr0), (_txtr0), (_keyix), (_ant), \
(_flags), (_rtsrate), (_rtsdura), 0, 0, 0))
#define ath_hal_setupxtxdesc(_ah, _ds, \
_txr1, _txtr1, _txr2, _txtr2, _txr3, _txtr3) \
((*(_ah)->ah_setupXTxDesc)((_ah), (_ds), \
(_txr1), (_txtr1), (_txr2), (_txtr2), (_txr3), (_txtr3)))
#define ath_hal_filltxdesc(_ah, _ds, _b, _l, _did, _qid, _first, _last, _ds0) \
((*(_ah)->ah_fillTxDesc)((_ah), (_ds), (_b), (_l), (_did), (_qid), \
(_first), (_last), (_ds0)))
#define ath_hal_txprocdesc(_ah, _ds, _ts) \
((*(_ah)->ah_procTxDesc)((_ah), (_ds), (_ts)))
#define ath_hal_gettxintrtxqs(_ah, _txqs) \
((*(_ah)->ah_getTxIntrQueue)((_ah), (_txqs)))
#define ath_hal_gettxcompletionrates(_ah, _ds, _rates, _tries) \
((*(_ah)->ah_getTxCompletionRates)((_ah), (_ds), (_rates), (_tries)))
#define ath_hal_settxdesclink(_ah, _ds, _link) \
((*(_ah)->ah_setTxDescLink)((_ah), (_ds), (_link)))
#define ath_hal_gettxdesclink(_ah, _ds, _link) \
((*(_ah)->ah_getTxDescLink)((_ah), (_ds), (_link)))
#define ath_hal_gettxdesclinkptr(_ah, _ds, _linkptr) \
((*(_ah)->ah_getTxDescLinkPtr)((_ah), (_ds), (_linkptr)))
#define ath_hal_setuptxstatusring(_ah, _tsstart, _tspstart, _size) \
((*(_ah)->ah_setupTxStatusRing)((_ah), (_tsstart), (_tspstart), \
(_size)))
#define ath_hal_gettxrawtxdesc(_ah, _txstatus) \
((*(_ah)->ah_getTxRawTxDesc)((_ah), (_txstatus)))
#define ath_hal_setupfirsttxdesc(_ah, _ds, _aggrlen, _flags, _txpower, \
_txr0, _txtr0, _antm, _rcr, _rcd) \
((*(_ah)->ah_setupFirstTxDesc)((_ah), (_ds), (_aggrlen), (_flags), \
(_txpower), (_txr0), (_txtr0), (_antm), (_rcr), (_rcd)))
#define ath_hal_chaintxdesc(_ah, _ds, _bl, _sl, _pktlen, _hdrlen, _type, \
_keyix, _cipher, _delims, _first, _last, _lastaggr) \
((*(_ah)->ah_chainTxDesc)((_ah), (_ds), (_bl), (_sl), \
(_pktlen), (_hdrlen), (_type), (_keyix), (_cipher), (_delims), \
(_first), (_last), (_lastaggr)))
#define ath_hal_setuplasttxdesc(_ah, _ds, _ds0) \
((*(_ah)->ah_setupLastTxDesc)((_ah), (_ds), (_ds0)))
#define ath_hal_set11nratescenario(_ah, _ds, _dur, _rt, _series, _ns, _flags) \
((*(_ah)->ah_set11nRateScenario)((_ah), (_ds), (_dur), (_rt), \
(_series), (_ns), (_flags)))
#define ath_hal_set11n_aggr_first(_ah, _ds, _len, _num) \
((*(_ah)->ah_set11nAggrFirst)((_ah), (_ds), (_len), (_num)))
#define ath_hal_set11n_aggr_middle(_ah, _ds, _num) \
((*(_ah)->ah_set11nAggrMiddle)((_ah), (_ds), (_num)))
#define ath_hal_set11n_aggr_last(_ah, _ds) \
((*(_ah)->ah_set11nAggrLast)((_ah), (_ds)))
#define ath_hal_set11nburstduration(_ah, _ds, _dur) \
((*(_ah)->ah_set11nBurstDuration)((_ah), (_ds), (_dur)))
#define ath_hal_clr11n_aggr(_ah, _ds) \
((*(_ah)->ah_clr11nAggr)((_ah), (_ds)))
#define ath_hal_set11n_virtmorefrag(_ah, _ds, _v) \
((*(_ah)->ah_set11nVirtMoreFrag)((_ah), (_ds), (_v)))
#define ath_hal_gpioCfgOutput(_ah, _gpio, _type) \
((*(_ah)->ah_gpioCfgOutput)((_ah), (_gpio), (_type)))
#define ath_hal_gpioset(_ah, _gpio, _b) \
((*(_ah)->ah_gpioSet)((_ah), (_gpio), (_b)))
#define ath_hal_gpioget(_ah, _gpio) \
((*(_ah)->ah_gpioGet)((_ah), (_gpio)))
#define ath_hal_gpiosetintr(_ah, _gpio, _b) \
((*(_ah)->ah_gpioSetIntr)((_ah), (_gpio), (_b)))
/*
* PCIe suspend/resume/poweron/poweroff related macros
*/
#define ath_hal_enablepcie(_ah, _restore, _poweroff) \
((*(_ah)->ah_configPCIE)((_ah), (_restore), (_poweroff)))
#define ath_hal_disablepcie(_ah) \
((*(_ah)->ah_disablePCIE)((_ah)))
/*
* This is badly-named; you need to set the correct parameters
* to begin to receive useful radar events; and even then
* it doesn't "enable" DFS. See the ath_dfs/null/ module for
* more information.
*/
#define ath_hal_enabledfs(_ah, _param) \
((*(_ah)->ah_enableDfs)((_ah), (_param)))
#define ath_hal_getdfsthresh(_ah, _param) \
((*(_ah)->ah_getDfsThresh)((_ah), (_param)))
#define ath_hal_getdfsdefaultthresh(_ah, _param) \
((*(_ah)->ah_getDfsDefaultThresh)((_ah), (_param)))
#define ath_hal_procradarevent(_ah, _rxs, _fulltsf, _buf, _event) \
((*(_ah)->ah_procRadarEvent)((_ah), (_rxs), (_fulltsf), \
(_buf), (_event)))
#define ath_hal_is_fast_clock_enabled(_ah) \
((*(_ah)->ah_isFastClockEnabled)((_ah)))
#define ath_hal_radar_wait(_ah, _chan) \
((*(_ah)->ah_radarWait)((_ah), (_chan)))
#define ath_hal_get_mib_cycle_counts(_ah, _sample) \
((*(_ah)->ah_getMibCycleCounts)((_ah), (_sample)))
#define ath_hal_get_chan_ext_busy(_ah) \
((*(_ah)->ah_get11nExtBusy)((_ah)))
#define ath_hal_setchainmasks(_ah, _txchainmask, _rxchainmask) \
((*(_ah)->ah_setChainMasks)((_ah), (_txchainmask), (_rxchainmask)))
#define ath_hal_spectral_supported(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_SPECTRAL_SCAN, 0, NULL) == HAL_OK)
#define ath_hal_spectral_get_config(_ah, _p) \
((*(_ah)->ah_spectralGetConfig)((_ah), (_p)))
#define ath_hal_spectral_configure(_ah, _p) \
((*(_ah)->ah_spectralConfigure)((_ah), (_p)))
#define ath_hal_spectral_start(_ah) \
((*(_ah)->ah_spectralStart)((_ah)))
#define ath_hal_spectral_stop(_ah) \
((*(_ah)->ah_spectralStop)((_ah)))
#define ath_hal_btcoex_supported(_ah) \
(ath_hal_getcapability(_ah, HAL_CAP_BT_COEX, 0, NULL) == HAL_OK)
#define ath_hal_btcoex_set_info(_ah, _info) \
((*(_ah)->ah_btCoexSetInfo)((_ah), (_info)))
#define ath_hal_btcoex_set_config(_ah, _cfg) \
((*(_ah)->ah_btCoexSetConfig)((_ah), (_cfg)))
#define ath_hal_btcoex_set_qcu_thresh(_ah, _qcuid) \
((*(_ah)->ah_btCoexSetQcuThresh)((_ah), (_qcuid)))
#define ath_hal_btcoex_set_weights(_ah, _weight) \
((*(_ah)->ah_btCoexSetWeights)((_ah), (_weight)))
#define ath_hal_btcoex_set_weights(_ah, _weight) \
((*(_ah)->ah_btCoexSetWeights)((_ah), (_weight)))
#define ath_hal_btcoex_set_bmiss_thresh(_ah, _thr) \
((*(_ah)->ah_btCoexSetBmissThresh)((_ah), (_thr)))
#define ath_hal_btcoex_set_parameter(_ah, _attrib, _val) \
((*(_ah)->ah_btCoexSetParameter)((_ah), (_attrib), (_val)))
#define ath_hal_btcoex_enable(_ah) \
((*(_ah)->ah_btCoexEnable)((_ah)))
#define ath_hal_btcoex_disable(_ah) \
((*(_ah)->ah_btCoexDisable)((_ah)))
#define ath_hal_div_comb_conf_get(_ah, _conf) \
((*(_ah)->ah_divLnaConfGet)((_ah), (_conf)))
#define ath_hal_div_comb_conf_set(_ah, _conf) \
((*(_ah)->ah_divLnaConfSet)((_ah), (_conf)))
#endif /* _DEV_ATH_ATHVAR_H */
Index: head/sys/dev/bwi/bwimac.c
===================================================================
--- head/sys/dev/bwi/bwimac.c (revision 287196)
+++ head/sys/dev/bwi/bwimac.c (revision 287197)
@@ -1,1972 +1,1973 @@
/*
* Copyright (c) 2007 The DragonFly Project. All rights reserved.
*
* This code is derived from software contributed to The DragonFly Project
* by Sepherosa Ziehau <sepherosa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of The DragonFly Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific, prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $DragonFly: src/sys/dev/netif/bwi/bwimac.c,v 1.13 2008/02/15 11:15:38 sephe Exp $
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_bwi.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_amrr.h>
#include <net80211/ieee80211_phy.h>
#include <machine/bus.h>
#include <dev/bwi/bitops.h>
#include <dev/bwi/if_bwireg.h>
#include <dev/bwi/if_bwivar.h>
#include <dev/bwi/bwimac.h>
#include <dev/bwi/bwirf.h>
#include <dev/bwi/bwiphy.h>
struct bwi_retry_lim {
uint16_t shretry;
uint16_t shretry_fb;
uint16_t lgretry;
uint16_t lgretry_fb;
};
static int bwi_mac_test(struct bwi_mac *);
static int bwi_mac_get_property(struct bwi_mac *);
static void bwi_mac_set_retry_lim(struct bwi_mac *,
const struct bwi_retry_lim *);
static void bwi_mac_set_ackrates(struct bwi_mac *,
const struct ieee80211_rate_table *rt,
const struct ieee80211_rateset *);
static int bwi_mac_gpio_init(struct bwi_mac *);
static int bwi_mac_gpio_fini(struct bwi_mac *);
static void bwi_mac_opmode_init(struct bwi_mac *);
static void bwi_mac_hostflags_init(struct bwi_mac *);
static void bwi_mac_bss_param_init(struct bwi_mac *);
static void bwi_mac_fw_free(struct bwi_mac *);
static int bwi_mac_fw_load(struct bwi_mac *);
static int bwi_mac_fw_init(struct bwi_mac *);
static int bwi_mac_fw_load_iv(struct bwi_mac *, const struct firmware *);
static void bwi_mac_setup_tpctl(struct bwi_mac *);
static void bwi_mac_adjust_tpctl(struct bwi_mac *, int, int);
static void bwi_mac_lock(struct bwi_mac *);
static void bwi_mac_unlock(struct bwi_mac *);
static const uint8_t bwi_sup_macrev[] = { 2, 4, 5, 6, 7, 9, 10 };
void
bwi_tmplt_write_4(struct bwi_mac *mac, uint32_t ofs, uint32_t val)
{
struct bwi_softc *sc = mac->mac_sc;
if (mac->mac_flags & BWI_MAC_F_BSWAP)
val = bswap32(val);
CSR_WRITE_4(sc, BWI_MAC_TMPLT_CTRL, ofs);
CSR_WRITE_4(sc, BWI_MAC_TMPLT_DATA, val);
}
void
bwi_hostflags_write(struct bwi_mac *mac, uint64_t flags)
{
uint64_t val;
val = flags & 0xffff;
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_HFLAGS_LO, val);
val = (flags >> 16) & 0xffff;
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_HFLAGS_MI, val);
/* HI has unclear meaning, so leave it as it is */
}
uint64_t
bwi_hostflags_read(struct bwi_mac *mac)
{
uint64_t flags, val;
/* HI has unclear meaning, so don't touch it */
flags = 0;
val = MOBJ_READ_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_HFLAGS_MI);
flags |= val << 16;
val = MOBJ_READ_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_HFLAGS_LO);
flags |= val;
return flags;
}
uint16_t
bwi_memobj_read_2(struct bwi_mac *mac, uint16_t obj_id, uint16_t ofs0)
{
struct bwi_softc *sc = mac->mac_sc;
uint32_t data_reg;
int ofs;
data_reg = BWI_MOBJ_DATA;
ofs = ofs0 / 4;
if (ofs0 % 4 != 0)
data_reg = BWI_MOBJ_DATA_UNALIGN;
CSR_WRITE_4(sc, BWI_MOBJ_CTRL, BWI_MOBJ_CTRL_VAL(obj_id, ofs));
return CSR_READ_2(sc, data_reg);
}
uint32_t
bwi_memobj_read_4(struct bwi_mac *mac, uint16_t obj_id, uint16_t ofs0)
{
struct bwi_softc *sc = mac->mac_sc;
int ofs;
ofs = ofs0 / 4;
if (ofs0 % 4 != 0) {
uint32_t ret;
CSR_WRITE_4(sc, BWI_MOBJ_CTRL, BWI_MOBJ_CTRL_VAL(obj_id, ofs));
ret = CSR_READ_2(sc, BWI_MOBJ_DATA_UNALIGN);
ret <<= 16;
CSR_WRITE_4(sc, BWI_MOBJ_CTRL,
BWI_MOBJ_CTRL_VAL(obj_id, ofs + 1));
ret |= CSR_READ_2(sc, BWI_MOBJ_DATA);
return ret;
} else {
CSR_WRITE_4(sc, BWI_MOBJ_CTRL, BWI_MOBJ_CTRL_VAL(obj_id, ofs));
return CSR_READ_4(sc, BWI_MOBJ_DATA);
}
}
void
bwi_memobj_write_2(struct bwi_mac *mac, uint16_t obj_id, uint16_t ofs0,
uint16_t v)
{
struct bwi_softc *sc = mac->mac_sc;
uint32_t data_reg;
int ofs;
data_reg = BWI_MOBJ_DATA;
ofs = ofs0 / 4;
if (ofs0 % 4 != 0)
data_reg = BWI_MOBJ_DATA_UNALIGN;
CSR_WRITE_4(sc, BWI_MOBJ_CTRL, BWI_MOBJ_CTRL_VAL(obj_id, ofs));
CSR_WRITE_2(sc, data_reg, v);
}
void
bwi_memobj_write_4(struct bwi_mac *mac, uint16_t obj_id, uint16_t ofs0,
uint32_t v)
{
struct bwi_softc *sc = mac->mac_sc;
int ofs;
ofs = ofs0 / 4;
if (ofs0 % 4 != 0) {
CSR_WRITE_4(sc, BWI_MOBJ_CTRL, BWI_MOBJ_CTRL_VAL(obj_id, ofs));
CSR_WRITE_2(sc, BWI_MOBJ_DATA_UNALIGN, v >> 16);
CSR_WRITE_4(sc, BWI_MOBJ_CTRL,
BWI_MOBJ_CTRL_VAL(obj_id, ofs + 1));
CSR_WRITE_2(sc, BWI_MOBJ_DATA, v & 0xffff);
} else {
CSR_WRITE_4(sc, BWI_MOBJ_CTRL, BWI_MOBJ_CTRL_VAL(obj_id, ofs));
CSR_WRITE_4(sc, BWI_MOBJ_DATA, v);
}
}
int
bwi_mac_lateattach(struct bwi_mac *mac)
{
int error;
if (mac->mac_rev >= 5)
CSR_READ_4(mac->mac_sc, BWI_STATE_HI); /* dummy read */
bwi_mac_reset(mac, 1);
error = bwi_phy_attach(mac);
if (error)
return error;
error = bwi_rf_attach(mac);
if (error)
return error;
/* Link 11B/G PHY, unlink 11A PHY */
if (mac->mac_phy.phy_mode == IEEE80211_MODE_11A)
bwi_mac_reset(mac, 0);
else
bwi_mac_reset(mac, 1);
error = bwi_mac_test(mac);
if (error)
return error;
error = bwi_mac_get_property(mac);
if (error)
return error;
error = bwi_rf_map_txpower(mac);
if (error)
return error;
bwi_rf_off(mac);
CSR_WRITE_2(mac->mac_sc, BWI_BBP_ATTEN, BWI_BBP_ATTEN_MAGIC);
bwi_regwin_disable(mac->mac_sc, &mac->mac_regwin, 0);
return 0;
}
int
bwi_mac_init(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
int error, i;
/* Clear MAC/PHY/RF states */
bwi_mac_setup_tpctl(mac);
bwi_rf_clear_state(&mac->mac_rf);
bwi_phy_clear_state(&mac->mac_phy);
/* Enable MAC and linked it to PHY */
if (!bwi_regwin_is_enabled(sc, &mac->mac_regwin))
bwi_mac_reset(mac, 1);
/* Initialize backplane */
error = bwi_bus_init(sc, mac);
if (error)
return error;
/* do timeout fixup */
if (sc->sc_bus_regwin.rw_rev <= 5 &&
sc->sc_bus_regwin.rw_type != BWI_REGWIN_T_BUSPCIE) {
CSR_SETBITS_4(sc, BWI_CONF_LO,
__SHIFTIN(BWI_CONF_LO_SERVTO, BWI_CONF_LO_SERVTO_MASK) |
__SHIFTIN(BWI_CONF_LO_REQTO, BWI_CONF_LO_REQTO_MASK));
}
/* Calibrate PHY */
error = bwi_phy_calibrate(mac);
if (error) {
device_printf(sc->sc_dev, "PHY calibrate failed\n");
return error;
}
/* Prepare to initialize firmware */
CSR_WRITE_4(sc, BWI_MAC_STATUS,
BWI_MAC_STATUS_UCODE_JUMP0 |
BWI_MAC_STATUS_IHREN);
/*
* Load and initialize firmwares
*/
error = bwi_mac_fw_load(mac);
if (error)
return error;
error = bwi_mac_gpio_init(mac);
if (error)
return error;
error = bwi_mac_fw_init(mac);
if (error)
return error;
/*
* Turn on RF
*/
bwi_rf_on(mac);
/* TODO: LED, hardware rf enabled is only related to LED setting */
/*
* Initialize PHY
*/
CSR_WRITE_2(sc, BWI_BBP_ATTEN, 0);
bwi_phy_init(mac);
/* TODO: interference mitigation */
/*
* Setup antenna mode
*/
bwi_rf_set_ant_mode(mac, mac->mac_rf.rf_ant_mode);
/*
* Initialize operation mode (RX configuration)
*/
bwi_mac_opmode_init(mac);
/* set up Beacon interval */
if (mac->mac_rev < 3) {
CSR_WRITE_2(sc, 0x60e, 0);
CSR_WRITE_2(sc, 0x610, 0x8000);
CSR_WRITE_2(sc, 0x604, 0);
CSR_WRITE_2(sc, 0x606, 0x200);
} else {
CSR_WRITE_4(sc, 0x188, 0x80000000);
CSR_WRITE_4(sc, 0x18c, 0x2000000);
}
/*
* Initialize TX/RX interrupts' mask
*/
CSR_WRITE_4(sc, BWI_MAC_INTR_STATUS, BWI_INTR_TIMER1);
for (i = 0; i < BWI_TXRX_NRING; ++i) {
uint32_t intrs;
if (BWI_TXRX_IS_RX(i))
intrs = BWI_TXRX_RX_INTRS;
else
intrs = BWI_TXRX_TX_INTRS;
CSR_WRITE_4(sc, BWI_TXRX_INTR_MASK(i), intrs);
}
/* allow the MAC to control the PHY clock (dynamic on/off) */
CSR_SETBITS_4(sc, BWI_STATE_LO, 0x100000);
/* Setup MAC power up delay */
CSR_WRITE_2(sc, BWI_MAC_POWERUP_DELAY, sc->sc_pwron_delay);
/* Set MAC regwin revision */
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_MACREV, mac->mac_rev);
/*
* Initialize host flags
*/
bwi_mac_hostflags_init(mac);
/*
* Initialize BSS parameters
*/
bwi_mac_bss_param_init(mac);
/*
* Initialize TX rings
*/
for (i = 0; i < BWI_TX_NRING; ++i) {
error = sc->sc_init_tx_ring(sc, i);
if (error) {
device_printf(sc->sc_dev,
"can't initialize %dth TX ring\n", i);
return error;
}
}
/*
* Initialize RX ring
*/
error = sc->sc_init_rx_ring(sc);
if (error) {
device_printf(sc->sc_dev, "can't initialize RX ring\n");
return error;
}
/*
* Initialize TX stats if the current MAC uses that
*/
if (mac->mac_flags & BWI_MAC_F_HAS_TXSTATS) {
error = sc->sc_init_txstats(sc);
if (error) {
device_printf(sc->sc_dev,
"can't initialize TX stats ring\n");
return error;
}
}
/* update PRETBTT */
CSR_WRITE_2(sc, 0x612, 0x50); /* Force Pre-TBTT to 80? */
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, 0x416, 0x50);
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, 0x414, 0x1f4);
mac->mac_flags |= BWI_MAC_F_INITED;
return 0;
}
void
bwi_mac_reset(struct bwi_mac *mac, int link_phy)
{
struct bwi_softc *sc = mac->mac_sc;
uint32_t flags, state_lo, status;
flags = BWI_STATE_LO_FLAG_PHYRST | BWI_STATE_LO_FLAG_PHYCLKEN;
if (link_phy)
flags |= BWI_STATE_LO_FLAG_PHYLNK;
bwi_regwin_enable(sc, &mac->mac_regwin, flags);
DELAY(2000);
state_lo = CSR_READ_4(sc, BWI_STATE_LO);
state_lo |= BWI_STATE_LO_GATED_CLOCK;
state_lo &= ~__SHIFTIN(BWI_STATE_LO_FLAG_PHYRST,
BWI_STATE_LO_FLAGS_MASK);
CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_STATE_LO);
DELAY(1000);
state_lo &= ~BWI_STATE_LO_GATED_CLOCK;
CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_STATE_LO);
DELAY(1000);
CSR_WRITE_2(sc, BWI_BBP_ATTEN, 0);
status = CSR_READ_4(sc, BWI_MAC_STATUS);
status |= BWI_MAC_STATUS_IHREN;
if (link_phy)
status |= BWI_MAC_STATUS_PHYLNK;
else
status &= ~BWI_MAC_STATUS_PHYLNK;
CSR_WRITE_4(sc, BWI_MAC_STATUS, status);
if (link_phy) {
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_ATTACH | BWI_DBG_INIT,
"%s\n", "PHY is linked");
mac->mac_phy.phy_flags |= BWI_PHY_F_LINKED;
} else {
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_ATTACH | BWI_DBG_INIT,
"%s\n", "PHY is unlinked");
mac->mac_phy.phy_flags &= ~BWI_PHY_F_LINKED;
}
}
void
bwi_mac_set_tpctl_11bg(struct bwi_mac *mac, const struct bwi_tpctl *new_tpctl)
{
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_tpctl *tpctl = &mac->mac_tpctl;
if (new_tpctl != NULL) {
KASSERT(new_tpctl->bbp_atten <= BWI_BBP_ATTEN_MAX,
("bbp_atten %d", new_tpctl->bbp_atten));
KASSERT(new_tpctl->rf_atten <=
(rf->rf_rev < 6 ? BWI_RF_ATTEN_MAX0
: BWI_RF_ATTEN_MAX1),
("rf_atten %d", new_tpctl->rf_atten));
KASSERT(new_tpctl->tp_ctrl1 <= BWI_TPCTL1_MAX,
("tp_ctrl1 %d", new_tpctl->tp_ctrl1));
tpctl->bbp_atten = new_tpctl->bbp_atten;
tpctl->rf_atten = new_tpctl->rf_atten;
tpctl->tp_ctrl1 = new_tpctl->tp_ctrl1;
}
/* Set BBP attenuation */
bwi_phy_set_bbp_atten(mac, tpctl->bbp_atten);
/* Set RF attenuation */
RF_WRITE(mac, BWI_RFR_ATTEN, tpctl->rf_atten);
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_RF_ATTEN,
tpctl->rf_atten);
/* Set TX power */
if (rf->rf_type == BWI_RF_T_BCM2050) {
RF_FILT_SETBITS(mac, BWI_RFR_TXPWR, ~BWI_RFR_TXPWR1_MASK,
__SHIFTIN(tpctl->tp_ctrl1, BWI_RFR_TXPWR1_MASK));
}
/* Adjust RF Local Oscillator */
if (mac->mac_phy.phy_mode == IEEE80211_MODE_11G)
bwi_rf_lo_adjust(mac, tpctl);
}
static int
bwi_mac_test(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
uint32_t orig_val, val;
#define TEST_VAL1 0xaa5555aa
#define TEST_VAL2 0x55aaaa55
/* Save it for later restoring */
orig_val = MOBJ_READ_4(mac, BWI_COMM_MOBJ, 0);
/* Test 1 */
MOBJ_WRITE_4(mac, BWI_COMM_MOBJ, 0, TEST_VAL1);
val = MOBJ_READ_4(mac, BWI_COMM_MOBJ, 0);
if (val != TEST_VAL1) {
device_printf(sc->sc_dev, "TEST1 failed\n");
return ENXIO;
}
/* Test 2 */
MOBJ_WRITE_4(mac, BWI_COMM_MOBJ, 0, TEST_VAL2);
val = MOBJ_READ_4(mac, BWI_COMM_MOBJ, 0);
if (val != TEST_VAL2) {
device_printf(sc->sc_dev, "TEST2 failed\n");
return ENXIO;
}
/* Restore to the original value */
MOBJ_WRITE_4(mac, BWI_COMM_MOBJ, 0, orig_val);
val = CSR_READ_4(sc, BWI_MAC_STATUS);
if ((val & ~BWI_MAC_STATUS_PHYLNK) != BWI_MAC_STATUS_IHREN) {
device_printf(sc->sc_dev, "%s failed, MAC status 0x%08x\n",
__func__, val);
return ENXIO;
}
val = CSR_READ_4(sc, BWI_MAC_INTR_STATUS);
if (val != 0) {
device_printf(sc->sc_dev, "%s failed, intr status %08x\n",
__func__, val);
return ENXIO;
}
#undef TEST_VAL2
#undef TEST_VAL1
return 0;
}
static void
bwi_mac_setup_tpctl(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_tpctl *tpctl = &mac->mac_tpctl;
/* Calc BBP attenuation */
if (rf->rf_type == BWI_RF_T_BCM2050 && rf->rf_rev < 6)
tpctl->bbp_atten = 0;
else
tpctl->bbp_atten = 2;
/* Calc TX power CTRL1?? */
tpctl->tp_ctrl1 = 0;
if (rf->rf_type == BWI_RF_T_BCM2050) {
if (rf->rf_rev == 1)
tpctl->tp_ctrl1 = 3;
else if (rf->rf_rev < 6)
tpctl->tp_ctrl1 = 2;
else if (rf->rf_rev == 8)
tpctl->tp_ctrl1 = 1;
}
/* Empty TX power CTRL2?? */
tpctl->tp_ctrl2 = 0xffff;
/*
* Calc RF attenuation
*/
if (phy->phy_mode == IEEE80211_MODE_11A) {
tpctl->rf_atten = 0x60;
goto back;
}
if (BWI_IS_BRCM_BCM4309G(sc) && sc->sc_pci_revid < 0x51) {
tpctl->rf_atten = sc->sc_pci_revid < 0x43 ? 2 : 3;
goto back;
}
tpctl->rf_atten = 5;
if (rf->rf_type != BWI_RF_T_BCM2050) {
if (rf->rf_type == BWI_RF_T_BCM2053 && rf->rf_rev == 1)
tpctl->rf_atten = 6;
goto back;
}
/*
* NB: If we reaches here and the card is BRCM_BCM4309G,
* then the card's PCI revision must >= 0x51
*/
/* BCM2050 RF */
switch (rf->rf_rev) {
case 1:
if (phy->phy_mode == IEEE80211_MODE_11G) {
if (BWI_IS_BRCM_BCM4309G(sc) || BWI_IS_BRCM_BU4306(sc))
tpctl->rf_atten = 3;
else
tpctl->rf_atten = 1;
} else {
if (BWI_IS_BRCM_BCM4309G(sc))
tpctl->rf_atten = 7;
else
tpctl->rf_atten = 6;
}
break;
case 2:
if (phy->phy_mode == IEEE80211_MODE_11G) {
/*
* NOTE: Order of following conditions is critical
*/
if (BWI_IS_BRCM_BCM4309G(sc))
tpctl->rf_atten = 3;
else if (BWI_IS_BRCM_BU4306(sc))
tpctl->rf_atten = 5;
else if (sc->sc_bbp_id == BWI_BBPID_BCM4320)
tpctl->rf_atten = 4;
else
tpctl->rf_atten = 3;
} else {
tpctl->rf_atten = 6;
}
break;
case 4:
case 5:
tpctl->rf_atten = 1;
break;
case 8:
tpctl->rf_atten = 0x1a;
break;
}
back:
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_INIT | BWI_DBG_TXPOWER,
"bbp atten: %u, rf atten: %u, ctrl1: %u, ctrl2: %u\n",
tpctl->bbp_atten, tpctl->rf_atten,
tpctl->tp_ctrl1, tpctl->tp_ctrl2);
}
void
bwi_mac_dummy_xmit(struct bwi_mac *mac)
{
#define PACKET_LEN 5
static const uint32_t packet_11a[PACKET_LEN] =
{ 0x000201cc, 0x00d40000, 0x00000000, 0x01000000, 0x00000000 };
static const uint32_t packet_11bg[PACKET_LEN] =
{ 0x000b846e, 0x00d40000, 0x00000000, 0x01000000, 0x00000000 };
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
const uint32_t *packet;
uint16_t val_50c;
int wait_max, i;
if (mac->mac_phy.phy_mode == IEEE80211_MODE_11A) {
wait_max = 30;
packet = packet_11a;
val_50c = 1;
} else {
wait_max = 250;
packet = packet_11bg;
val_50c = 0;
}
for (i = 0; i < PACKET_LEN; ++i)
TMPLT_WRITE_4(mac, i * 4, packet[i]);
CSR_READ_4(sc, BWI_MAC_STATUS); /* dummy read */
CSR_WRITE_2(sc, 0x568, 0);
CSR_WRITE_2(sc, 0x7c0, 0);
CSR_WRITE_2(sc, 0x50c, val_50c);
CSR_WRITE_2(sc, 0x508, 0);
CSR_WRITE_2(sc, 0x50a, 0);
CSR_WRITE_2(sc, 0x54c, 0);
CSR_WRITE_2(sc, 0x56a, 0x14);
CSR_WRITE_2(sc, 0x568, 0x826);
CSR_WRITE_2(sc, 0x500, 0);
CSR_WRITE_2(sc, 0x502, 0x30);
if (rf->rf_type == BWI_RF_T_BCM2050 && rf->rf_rev <= 5)
RF_WRITE(mac, 0x51, 0x17);
for (i = 0; i < wait_max; ++i) {
if (CSR_READ_2(sc, 0x50e) & 0x80)
break;
DELAY(10);
}
for (i = 0; i < 10; ++i) {
if (CSR_READ_2(sc, 0x50e) & 0x400)
break;
DELAY(10);
}
for (i = 0; i < 10; ++i) {
if ((CSR_READ_2(sc, 0x690) & 0x100) == 0)
break;
DELAY(10);
}
if (rf->rf_type == BWI_RF_T_BCM2050 && rf->rf_rev <= 5)
RF_WRITE(mac, 0x51, 0x37);
#undef PACKET_LEN
}
void
bwi_mac_init_tpctl_11bg(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_tpctl tpctl_orig;
int restore_tpctl = 0;
KASSERT(phy->phy_mode != IEEE80211_MODE_11A,
("phy_mode %d", phy->phy_mode));
if (BWI_IS_BRCM_BU4306(sc))
return;
PHY_WRITE(mac, 0x28, 0x8018);
CSR_CLRBITS_2(sc, BWI_BBP_ATTEN, 0x20);
if (phy->phy_mode == IEEE80211_MODE_11G) {
if ((phy->phy_flags & BWI_PHY_F_LINKED) == 0)
return;
PHY_WRITE(mac, 0x47a, 0xc111);
}
if (mac->mac_flags & BWI_MAC_F_TPCTL_INITED)
return;
if (phy->phy_mode == IEEE80211_MODE_11B && phy->phy_rev >= 2 &&
rf->rf_type == BWI_RF_T_BCM2050) {
RF_SETBITS(mac, 0x76, 0x84);
} else {
struct bwi_tpctl tpctl;
/* Backup original TX power control variables */
bcopy(&mac->mac_tpctl, &tpctl_orig, sizeof(tpctl_orig));
restore_tpctl = 1;
bcopy(&mac->mac_tpctl, &tpctl, sizeof(tpctl));
tpctl.bbp_atten = 11;
tpctl.tp_ctrl1 = 0;
#ifdef notyet
if (rf->rf_rev >= 6 && rf->rf_rev <= 8)
tpctl.rf_atten = 31;
else
#endif
tpctl.rf_atten = 9;
bwi_mac_set_tpctl_11bg(mac, &tpctl);
}
bwi_mac_dummy_xmit(mac);
mac->mac_flags |= BWI_MAC_F_TPCTL_INITED;
rf->rf_base_tssi = PHY_READ(mac, 0x29);
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_INIT | BWI_DBG_TXPOWER,
"base tssi %d\n", rf->rf_base_tssi);
if (abs(rf->rf_base_tssi - rf->rf_idle_tssi) >= 20) {
device_printf(sc->sc_dev, "base tssi measure failed\n");
mac->mac_flags |= BWI_MAC_F_TPCTL_ERROR;
}
if (restore_tpctl)
bwi_mac_set_tpctl_11bg(mac, &tpctl_orig);
else
RF_CLRBITS(mac, 0x76, 0x84);
bwi_rf_clear_tssi(mac);
}
void
bwi_mac_detach(struct bwi_mac *mac)
{
bwi_mac_fw_free(mac);
}
static __inline int
bwi_fwimage_is_valid(struct bwi_softc *sc, const struct firmware *fw,
uint8_t fw_type)
{
const struct bwi_fwhdr *hdr;
- struct ifnet *ifp = sc->sc_ifp;
if (fw->datasize < sizeof(*hdr)) {
- if_printf(ifp, "invalid firmware (%s): invalid size %zu\n",
- fw->name, fw->datasize);
+ device_printf(sc->sc_dev,
+ "invalid firmware (%s): invalid size %zu\n",
+ fw->name, fw->datasize);
return 0;
}
hdr = (const struct bwi_fwhdr *)fw->data;
if (fw_type != BWI_FW_T_IV) {
/*
* Don't verify IV's size, it has different meaning
*/
if (be32toh(hdr->fw_size) != fw->datasize - sizeof(*hdr)) {
- if_printf(ifp, "invalid firmware (%s): size mismatch, "
- "fw %u, real %zu\n", fw->name,
- be32toh(hdr->fw_size),
- fw->datasize - sizeof(*hdr));
+ device_printf(sc->sc_dev,
+ "invalid firmware (%s): size mismatch, "
+ "fw %u, real %zu\n", fw->name,
+ be32toh(hdr->fw_size), fw->datasize - sizeof(*hdr));
return 0;
}
}
if (hdr->fw_type != fw_type) {
- if_printf(ifp, "invalid firmware (%s): type mismatch, "
- "fw \'%c\', target \'%c\'\n", fw->name,
- hdr->fw_type, fw_type);
+ device_printf(sc->sc_dev,
+ "invalid firmware (%s): type mismatch, "
+ "fw \'%c\', target \'%c\'\n", fw->name,
+ hdr->fw_type, fw_type);
return 0;
}
if (hdr->fw_gen != BWI_FW_GEN_1) {
- if_printf(ifp, "invalid firmware (%s): wrong generation, "
- "fw %d, target %d\n", fw->name,
- hdr->fw_gen, BWI_FW_GEN_1);
+ device_printf(sc->sc_dev,
+ "invalid firmware (%s): wrong generation, "
+ "fw %d, target %d\n", fw->name, hdr->fw_gen, BWI_FW_GEN_1);
return 0;
}
return 1;
}
/*
* XXX Error cleanup
*/
int
bwi_mac_fw_alloc(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
char fwname[64];
int idx;
/*
* Try getting the firmware stub so firmware
* module would be loaded automatically
*/
if (mac->mac_stub == NULL) {
snprintf(fwname, sizeof(fwname), BWI_FW_STUB_PATH,
sc->sc_fw_version);
mac->mac_stub = firmware_get(fwname);
if (mac->mac_stub == NULL)
goto no_firmware;
}
if (mac->mac_ucode == NULL) {
snprintf(fwname, sizeof(fwname), BWI_FW_UCODE_PATH,
sc->sc_fw_version,
mac->mac_rev >= 5 ? 5 : mac->mac_rev);
mac->mac_ucode = firmware_get(fwname);
if (mac->mac_ucode == NULL)
goto no_firmware;
if (!bwi_fwimage_is_valid(sc, mac->mac_ucode, BWI_FW_T_UCODE))
return EINVAL;
}
if (mac->mac_pcm == NULL) {
snprintf(fwname, sizeof(fwname), BWI_FW_PCM_PATH,
sc->sc_fw_version,
mac->mac_rev < 5 ? 4 : 5);
mac->mac_pcm = firmware_get(fwname);
if (mac->mac_pcm == NULL)
goto no_firmware;
if (!bwi_fwimage_is_valid(sc, mac->mac_pcm, BWI_FW_T_PCM))
return EINVAL;
}
if (mac->mac_iv == NULL) {
/* TODO: 11A */
if (mac->mac_rev == 2 || mac->mac_rev == 4) {
idx = 2;
} else if (mac->mac_rev >= 5 && mac->mac_rev <= 10) {
idx = 5;
} else {
device_printf(sc->sc_dev,
"no suitible IV for MAC rev %d\n", mac->mac_rev);
return ENODEV;
}
snprintf(fwname, sizeof(fwname), BWI_FW_IV_PATH,
sc->sc_fw_version, idx);
mac->mac_iv = firmware_get(fwname);
if (mac->mac_iv == NULL)
goto no_firmware;
if (!bwi_fwimage_is_valid(sc, mac->mac_iv, BWI_FW_T_IV))
return EINVAL;
}
if (mac->mac_iv_ext == NULL) {
/* TODO: 11A */
if (mac->mac_rev == 2 || mac->mac_rev == 4 ||
mac->mac_rev >= 11) {
/* No extended IV */
return (0);
} else if (mac->mac_rev >= 5 && mac->mac_rev <= 10) {
idx = 5;
} else {
device_printf(sc->sc_dev,
"no suitible ExtIV for MAC rev %d\n", mac->mac_rev);
return ENODEV;
}
snprintf(fwname, sizeof(fwname), BWI_FW_IV_EXT_PATH,
sc->sc_fw_version, idx);
mac->mac_iv_ext = firmware_get(fwname);
if (mac->mac_iv_ext == NULL)
goto no_firmware;
if (!bwi_fwimage_is_valid(sc, mac->mac_iv_ext, BWI_FW_T_IV))
return EINVAL;
}
return (0);
no_firmware:
device_printf(sc->sc_dev, "request firmware %s failed\n", fwname);
return (ENOENT);
}
static void
bwi_mac_fw_free(struct bwi_mac *mac)
{
if (mac->mac_ucode != NULL) {
firmware_put(mac->mac_ucode, FIRMWARE_UNLOAD);
mac->mac_ucode = NULL;
}
if (mac->mac_pcm != NULL) {
firmware_put(mac->mac_pcm, FIRMWARE_UNLOAD);
mac->mac_pcm = NULL;
}
if (mac->mac_iv != NULL) {
firmware_put(mac->mac_iv, FIRMWARE_UNLOAD);
mac->mac_iv = NULL;
}
if (mac->mac_iv_ext != NULL) {
firmware_put(mac->mac_iv_ext, FIRMWARE_UNLOAD);
mac->mac_iv_ext = NULL;
}
if (mac->mac_stub != NULL) {
firmware_put(mac->mac_stub, FIRMWARE_UNLOAD);
mac->mac_stub = NULL;
}
}
static int
bwi_mac_fw_load(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
const uint32_t *fw;
uint16_t fw_rev;
int fw_len, i;
/*
* Load ucode image
*/
fw = (const uint32_t *)
((const uint8_t *)mac->mac_ucode->data + BWI_FWHDR_SZ);
fw_len = (mac->mac_ucode->datasize - BWI_FWHDR_SZ) / sizeof(uint32_t);
CSR_WRITE_4(sc, BWI_MOBJ_CTRL,
BWI_MOBJ_CTRL_VAL(
BWI_FW_UCODE_MOBJ | BWI_WR_MOBJ_AUTOINC, 0));
for (i = 0; i < fw_len; ++i) {
CSR_WRITE_4(sc, BWI_MOBJ_DATA, be32toh(fw[i]));
DELAY(10);
}
/*
* Load PCM image
*/
fw = (const uint32_t *)
((const uint8_t *)mac->mac_pcm->data + BWI_FWHDR_SZ);
fw_len = (mac->mac_pcm->datasize - BWI_FWHDR_SZ) / sizeof(uint32_t);
CSR_WRITE_4(sc, BWI_MOBJ_CTRL,
BWI_MOBJ_CTRL_VAL(BWI_FW_PCM_MOBJ, 0x01ea));
CSR_WRITE_4(sc, BWI_MOBJ_DATA, 0x4000);
CSR_WRITE_4(sc, BWI_MOBJ_CTRL,
BWI_MOBJ_CTRL_VAL(BWI_FW_PCM_MOBJ, 0x01eb));
for (i = 0; i < fw_len; ++i) {
CSR_WRITE_4(sc, BWI_MOBJ_DATA, be32toh(fw[i]));
DELAY(10);
}
CSR_WRITE_4(sc, BWI_MAC_INTR_STATUS, BWI_ALL_INTRS);
CSR_WRITE_4(sc, BWI_MAC_STATUS,
BWI_MAC_STATUS_UCODE_START |
BWI_MAC_STATUS_IHREN |
BWI_MAC_STATUS_INFRA);
#define NRETRY 200
for (i = 0; i < NRETRY; ++i) {
uint32_t intr_status;
intr_status = CSR_READ_4(sc, BWI_MAC_INTR_STATUS);
if (intr_status == BWI_INTR_READY)
break;
DELAY(10);
}
if (i == NRETRY) {
- if_printf(ifp, "firmware (ucode&pcm) loading timed out\n");
+ device_printf(sc->sc_dev,
+ "firmware (ucode&pcm) loading timed out\n");
return ETIMEDOUT;
}
#undef NRETRY
CSR_READ_4(sc, BWI_MAC_INTR_STATUS); /* dummy read */
fw_rev = MOBJ_READ_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_FWREV);
if (fw_rev > BWI_FW_VERSION3_REVMAX) {
- if_printf(ifp, "firmware version 4 is not supported yet\n");
+ device_printf(sc->sc_dev,
+ "firmware version 4 is not supported yet\n");
return ENODEV;
}
- if_printf(ifp, "firmware rev 0x%04x, patch level 0x%04x\n", fw_rev,
- MOBJ_READ_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_FWPATCHLV));
+ device_printf(sc->sc_dev,
+ "firmware rev 0x%04x, patch level 0x%04x\n", fw_rev,
+ MOBJ_READ_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_FWPATCHLV));
return 0;
}
static int
bwi_mac_gpio_init(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_regwin *old, *gpio_rw;
uint32_t filt, bits;
int error;
CSR_CLRBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_GPOSEL_MASK);
/* TODO:LED */
CSR_SETBITS_2(sc, BWI_MAC_GPIO_MASK, 0xf);
filt = 0x1f;
bits = 0xf;
if (sc->sc_bbp_id == BWI_BBPID_BCM4301) {
filt |= 0x60;
bits |= 0x60;
}
if (sc->sc_card_flags & BWI_CARD_F_PA_GPIO9) {
CSR_SETBITS_2(sc, BWI_MAC_GPIO_MASK, 0x200);
filt |= 0x200;
bits |= 0x200;
}
gpio_rw = BWI_GPIO_REGWIN(sc);
error = bwi_regwin_switch(sc, gpio_rw, &old);
if (error)
return error;
CSR_FILT_SETBITS_4(sc, BWI_GPIO_CTRL, filt, bits);
return bwi_regwin_switch(sc, old, NULL);
}
static int
bwi_mac_gpio_fini(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_regwin *old, *gpio_rw;
int error;
gpio_rw = BWI_GPIO_REGWIN(sc);
error = bwi_regwin_switch(sc, gpio_rw, &old);
if (error)
return error;
CSR_WRITE_4(sc, BWI_GPIO_CTRL, 0);
return bwi_regwin_switch(sc, old, NULL);
}
static int
bwi_mac_fw_load_iv(struct bwi_mac *mac, const struct firmware *fw)
{
struct bwi_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
const struct bwi_fwhdr *hdr;
const struct bwi_fw_iv *iv;
int n, i, iv_img_size;
/* Get the number of IVs in the IV image */
hdr = (const struct bwi_fwhdr *)fw->data;
n = be32toh(hdr->fw_iv_cnt);
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_INIT | BWI_DBG_FIRMWARE,
"IV count %d\n", n);
/* Calculate the IV image size, for later sanity check */
iv_img_size = fw->datasize - sizeof(*hdr);
/* Locate the first IV */
iv = (const struct bwi_fw_iv *)
((const uint8_t *)fw->data + sizeof(*hdr));
for (i = 0; i < n; ++i) {
uint16_t iv_ofs, ofs;
int sz = 0;
if (iv_img_size < sizeof(iv->iv_ofs)) {
- if_printf(ifp, "invalid IV image, ofs\n");
+ device_printf(sc->sc_dev, "invalid IV image, ofs\n");
return EINVAL;
}
iv_img_size -= sizeof(iv->iv_ofs);
sz += sizeof(iv->iv_ofs);
iv_ofs = be16toh(iv->iv_ofs);
ofs = __SHIFTOUT(iv_ofs, BWI_FW_IV_OFS_MASK);
if (ofs >= 0x1000) {
- if_printf(ifp, "invalid ofs (0x%04x) "
+ device_printf(sc->sc_dev, "invalid ofs (0x%04x) "
"for %dth iv\n", ofs, i);
return EINVAL;
}
if (iv_ofs & BWI_FW_IV_IS_32BIT) {
uint32_t val32;
if (iv_img_size < sizeof(iv->iv_val.val32)) {
- if_printf(ifp, "invalid IV image, val32\n");
+ device_printf(sc->sc_dev,
+ "invalid IV image, val32\n");
return EINVAL;
}
iv_img_size -= sizeof(iv->iv_val.val32);
sz += sizeof(iv->iv_val.val32);
val32 = be32toh(iv->iv_val.val32);
CSR_WRITE_4(sc, ofs, val32);
} else {
uint16_t val16;
if (iv_img_size < sizeof(iv->iv_val.val16)) {
- if_printf(ifp, "invalid IV image, val16\n");
+ device_printf(sc->sc_dev,
+ "invalid IV image, val16\n");
return EINVAL;
}
iv_img_size -= sizeof(iv->iv_val.val16);
sz += sizeof(iv->iv_val.val16);
val16 = be16toh(iv->iv_val.val16);
CSR_WRITE_2(sc, ofs, val16);
}
iv = (const struct bwi_fw_iv *)((const uint8_t *)iv + sz);
}
if (iv_img_size != 0) {
- if_printf(ifp, "invalid IV image, size left %d\n", iv_img_size);
+ device_printf(sc->sc_dev, "invalid IV image, size left %d\n",
+ iv_img_size);
return EINVAL;
}
return 0;
}
static int
bwi_mac_fw_init(struct bwi_mac *mac)
{
- struct ifnet *ifp = mac->mac_sc->sc_ifp;
+ device_t dev = mac->mac_sc->sc_dev;
int error;
error = bwi_mac_fw_load_iv(mac, mac->mac_iv);
if (error) {
- if_printf(ifp, "load IV failed\n");
+ device_printf(dev, "load IV failed\n");
return error;
}
if (mac->mac_iv_ext != NULL) {
error = bwi_mac_fw_load_iv(mac, mac->mac_iv_ext);
if (error)
- if_printf(ifp, "load ExtIV failed\n");
+ device_printf(dev, "load ExtIV failed\n");
}
return error;
}
static void
bwi_mac_opmode_init(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t mac_status;
uint16_t pre_tbtt;
CSR_CLRBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_INFRA);
CSR_SETBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_INFRA);
/* Set probe resp timeout to infinite */
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_PROBE_RESP_TO, 0);
/*
* TODO: factor out following part
*/
mac_status = CSR_READ_4(sc, BWI_MAC_STATUS);
mac_status &= ~(BWI_MAC_STATUS_OPMODE_HOSTAP |
BWI_MAC_STATUS_PASS_CTL |
BWI_MAC_STATUS_PASS_BCN |
BWI_MAC_STATUS_PASS_BADPLCP |
BWI_MAC_STATUS_PASS_BADFCS |
BWI_MAC_STATUS_PROMISC);
mac_status |= BWI_MAC_STATUS_INFRA;
/* Always turn on PROMISC on old hardware */
if (mac->mac_rev < 5)
mac_status |= BWI_MAC_STATUS_PROMISC;
switch (ic->ic_opmode) {
case IEEE80211_M_IBSS:
mac_status &= ~BWI_MAC_STATUS_INFRA;
break;
case IEEE80211_M_HOSTAP:
mac_status |= BWI_MAC_STATUS_OPMODE_HOSTAP;
break;
case IEEE80211_M_MONITOR:
#if 0
/* Do you want data from your microwave oven? */
mac_status |= BWI_MAC_STATUS_PASS_CTL |
BWI_MAC_STATUS_PASS_BADPLCP |
BWI_MAC_STATUS_PASS_BADFCS;
#else
mac_status |= BWI_MAC_STATUS_PASS_CTL;
#endif
/* Promisc? */
break;
default:
break;
}
- if (ic->ic_ifp->if_flags & IFF_PROMISC)
+ if (ic->ic_promisc > 0)
mac_status |= BWI_MAC_STATUS_PROMISC;
CSR_WRITE_4(sc, BWI_MAC_STATUS, mac_status);
if (ic->ic_opmode != IEEE80211_M_IBSS &&
ic->ic_opmode != IEEE80211_M_HOSTAP) {
if (sc->sc_bbp_id == BWI_BBPID_BCM4306 && sc->sc_bbp_rev == 3)
pre_tbtt = 100;
else
pre_tbtt = 50;
} else {
pre_tbtt = 2;
}
CSR_WRITE_2(sc, BWI_MAC_PRE_TBTT, pre_tbtt);
}
static void
bwi_mac_hostflags_init(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_rf *rf = &mac->mac_rf;
uint64_t host_flags;
if (phy->phy_mode == IEEE80211_MODE_11A)
return;
host_flags = HFLAGS_READ(mac);
host_flags |= BWI_HFLAG_SYM_WA;
if (phy->phy_mode == IEEE80211_MODE_11G) {
if (phy->phy_rev == 1)
host_flags |= BWI_HFLAG_GDC_WA;
if (sc->sc_card_flags & BWI_CARD_F_PA_GPIO9)
host_flags |= BWI_HFLAG_OFDM_PA;
} else if (phy->phy_mode == IEEE80211_MODE_11B) {
if (phy->phy_rev >= 2 && rf->rf_type == BWI_RF_T_BCM2050)
host_flags &= ~BWI_HFLAG_GDC_WA;
} else {
panic("unknown PHY mode %u\n", phy->phy_mode);
}
HFLAGS_WRITE(mac, host_flags);
}
static void
bwi_mac_bss_param_init(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct ieee80211_rate_table *rt;
struct bwi_retry_lim lim;
uint16_t cw_min;
/*
* Set short/long retry limits
*/
bzero(&lim, sizeof(lim));
lim.shretry = BWI_SHRETRY;
lim.shretry_fb = BWI_SHRETRY_FB;
lim.lgretry = BWI_LGRETRY;
lim.lgretry_fb = BWI_LGRETRY_FB;
bwi_mac_set_retry_lim(mac, &lim);
/*
* Implicitly prevent firmware from sending probe response
* by setting its "probe response timeout" to 1us.
*/
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_PROBE_RESP_TO, 1);
/*
* XXX MAC level acknowledge and CW min/max should depend
* on the char rateset of the IBSS/BSS to join.
* XXX this is all wrong; should be done on channel change
*/
if (phy->phy_mode == IEEE80211_MODE_11B) {
rt = ieee80211_get_ratetable(
ieee80211_find_channel(ic, 2412, IEEE80211_CHAN_B));
bwi_mac_set_ackrates(mac, rt,
&ic->ic_sup_rates[IEEE80211_MODE_11B]);
} else {
rt = ieee80211_get_ratetable(
ieee80211_find_channel(ic, 2412, IEEE80211_CHAN_G));
bwi_mac_set_ackrates(mac, rt,
&ic->ic_sup_rates[IEEE80211_MODE_11G]);
}
/*
* Set CW min
*/
if (phy->phy_mode == IEEE80211_MODE_11B)
cw_min = IEEE80211_CW_MIN_0;
else
cw_min = IEEE80211_CW_MIN_1;
MOBJ_WRITE_2(mac, BWI_80211_MOBJ, BWI_80211_MOBJ_CWMIN, cw_min);
/*
* Set CW max
*/
MOBJ_WRITE_2(mac, BWI_80211_MOBJ, BWI_80211_MOBJ_CWMAX,
IEEE80211_CW_MAX);
}
static void
bwi_mac_set_retry_lim(struct bwi_mac *mac, const struct bwi_retry_lim *lim)
{
/* Short/Long retry limit */
MOBJ_WRITE_2(mac, BWI_80211_MOBJ, BWI_80211_MOBJ_SHRETRY,
lim->shretry);
MOBJ_WRITE_2(mac, BWI_80211_MOBJ, BWI_80211_MOBJ_LGRETRY,
lim->lgretry);
/* Short/Long retry fallback limit */
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_SHRETRY_FB,
lim->shretry_fb);
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_LGRETEY_FB,
lim->lgretry_fb);
}
static void
bwi_mac_set_ackrates(struct bwi_mac *mac, const struct ieee80211_rate_table *rt,
const struct ieee80211_rateset *rs)
{
int i;
/* XXX not standard conforming */
for (i = 0; i < rs->rs_nrates; ++i) {
enum ieee80211_phytype modtype;
uint16_t ofs;
modtype = ieee80211_rate2phytype(rt,
rs->rs_rates[i] & IEEE80211_RATE_VAL);
switch (modtype) {
case IEEE80211_T_DS:
ofs = 0x4c0;
break;
case IEEE80211_T_OFDM:
ofs = 0x480;
break;
default:
panic("unsupported modtype %u\n", modtype);
}
ofs += 2*(ieee80211_rate2plcp(
rs->rs_rates[i] & IEEE80211_RATE_VAL,
modtype) & 0xf);
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, ofs + 0x20,
MOBJ_READ_2(mac, BWI_COMM_MOBJ, ofs));
}
}
int
bwi_mac_start(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
CSR_SETBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_ENABLE);
CSR_WRITE_4(sc, BWI_MAC_INTR_STATUS, BWI_INTR_READY);
/* Flush pending bus writes */
CSR_READ_4(sc, BWI_MAC_STATUS);
CSR_READ_4(sc, BWI_MAC_INTR_STATUS);
return bwi_mac_config_ps(mac);
}
int
bwi_mac_stop(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
int error, i;
error = bwi_mac_config_ps(mac);
if (error)
return error;
CSR_CLRBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_ENABLE);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_MAC_STATUS);
#define NRETRY 10000
for (i = 0; i < NRETRY; ++i) {
if (CSR_READ_4(sc, BWI_MAC_INTR_STATUS) & BWI_INTR_READY)
break;
DELAY(1);
}
if (i == NRETRY) {
device_printf(sc->sc_dev, "can't stop MAC\n");
return ETIMEDOUT;
}
#undef NRETRY
return 0;
}
int
bwi_mac_config_ps(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
uint32_t status;
status = CSR_READ_4(sc, BWI_MAC_STATUS);
status &= ~BWI_MAC_STATUS_HW_PS;
status |= BWI_MAC_STATUS_WAKEUP;
CSR_WRITE_4(sc, BWI_MAC_STATUS, status);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_MAC_STATUS);
if (mac->mac_rev >= 5) {
int i;
#define NRETRY 100
for (i = 0; i < NRETRY; ++i) {
if (MOBJ_READ_2(mac, BWI_COMM_MOBJ,
BWI_COMM_MOBJ_UCODE_STATE) != BWI_UCODE_STATE_PS)
break;
DELAY(10);
}
if (i == NRETRY) {
device_printf(sc->sc_dev, "config PS failed\n");
return ETIMEDOUT;
}
#undef NRETRY
}
return 0;
}
void
bwi_mac_reset_hwkeys(struct bwi_mac *mac)
{
/* TODO: firmware crypto */
MOBJ_READ_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_KEYTABLE_OFS);
}
void
bwi_mac_shutdown(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
int i;
if (mac->mac_flags & BWI_MAC_F_HAS_TXSTATS)
sc->sc_free_txstats(sc);
sc->sc_free_rx_ring(sc);
for (i = 0; i < BWI_TX_NRING; ++i)
sc->sc_free_tx_ring(sc, i);
bwi_rf_off(mac);
/* TODO:LED */
bwi_mac_gpio_fini(mac);
bwi_rf_off(mac); /* XXX again */
CSR_WRITE_2(sc, BWI_BBP_ATTEN, BWI_BBP_ATTEN_MAGIC);
bwi_regwin_disable(sc, &mac->mac_regwin, 0);
mac->mac_flags &= ~BWI_MAC_F_INITED;
}
static int
bwi_mac_get_property(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
enum bwi_bus_space old_bus_space;
uint32_t val;
/*
* Byte swap
*/
val = CSR_READ_4(sc, BWI_MAC_STATUS);
if (val & BWI_MAC_STATUS_BSWAP) {
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_ATTACH, "%s\n",
"need byte swap");
mac->mac_flags |= BWI_MAC_F_BSWAP;
}
/*
* DMA address space
*/
old_bus_space = sc->sc_bus_space;
val = CSR_READ_4(sc, BWI_STATE_HI);
if (__SHIFTOUT(val, BWI_STATE_HI_FLAGS_MASK) &
BWI_STATE_HI_FLAG_64BIT) {
/* 64bit address */
sc->sc_bus_space = BWI_BUS_SPACE_64BIT;
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_ATTACH, "%s\n",
"64bit bus space");
} else {
uint32_t txrx_reg = BWI_TXRX_CTRL_BASE + BWI_TX32_CTRL;
CSR_WRITE_4(sc, txrx_reg, BWI_TXRX32_CTRL_ADDRHI_MASK);
if (CSR_READ_4(sc, txrx_reg) & BWI_TXRX32_CTRL_ADDRHI_MASK) {
/* 32bit address */
sc->sc_bus_space = BWI_BUS_SPACE_32BIT;
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_ATTACH, "%s\n",
"32bit bus space");
} else {
/* 30bit address */
sc->sc_bus_space = BWI_BUS_SPACE_30BIT;
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_ATTACH, "%s\n",
"30bit bus space");
}
}
if (old_bus_space != 0 && old_bus_space != sc->sc_bus_space) {
device_printf(sc->sc_dev, "MACs bus space mismatch!\n");
return ENXIO;
}
return 0;
}
void
bwi_mac_updateslot(struct bwi_mac *mac, int shslot)
{
uint16_t slot_time;
if (mac->mac_phy.phy_mode == IEEE80211_MODE_11B)
return;
if (shslot)
slot_time = IEEE80211_DUR_SHSLOT;
else
slot_time = IEEE80211_DUR_SLOT;
CSR_WRITE_2(mac->mac_sc, BWI_MAC_SLOTTIME,
slot_time + BWI_MAC_SLOTTIME_ADJUST);
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_SLOTTIME, slot_time);
}
int
bwi_mac_attach(struct bwi_softc *sc, int id, uint8_t rev)
{
struct bwi_mac *mac;
int i;
KASSERT(sc->sc_nmac <= BWI_MAC_MAX && sc->sc_nmac >= 0,
("sc_nmac %d", sc->sc_nmac));
if (sc->sc_nmac == BWI_MAC_MAX) {
device_printf(sc->sc_dev, "too many MACs\n");
return 0;
}
/*
* More than one MAC is only supported by BCM4309
*/
if (sc->sc_nmac != 0 &&
sc->sc_pci_did != PCI_PRODUCT_BROADCOM_BCM4309) {
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_ATTACH, "%s\n",
"ignore second MAC");
return 0;
}
mac = &sc->sc_mac[sc->sc_nmac];
/* XXX will this happen? */
if (BWI_REGWIN_EXIST(&mac->mac_regwin)) {
device_printf(sc->sc_dev, "%dth MAC already attached\n",
sc->sc_nmac);
return 0;
}
/*
* Test whether the revision of this MAC is supported
*/
#define N(arr) (int)(sizeof(arr) / sizeof(arr[0]))
for (i = 0; i < N(bwi_sup_macrev); ++i) {
if (bwi_sup_macrev[i] == rev)
break;
}
if (i == N(bwi_sup_macrev)) {
device_printf(sc->sc_dev, "MAC rev %u is "
"not supported\n", rev);
return ENXIO;
}
#undef N
BWI_CREATE_MAC(mac, sc, id, rev);
sc->sc_nmac++;
if (mac->mac_rev < 5) {
mac->mac_flags |= BWI_MAC_F_HAS_TXSTATS;
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_ATTACH, "%s\n",
"has TX stats");
} else {
mac->mac_flags |= BWI_MAC_F_PHYE_RESET;
}
device_printf(sc->sc_dev, "MAC: rev %u\n", rev);
return 0;
}
static __inline void
bwi_mac_balance_atten(int *bbp_atten0, int *rf_atten0)
{
int bbp_atten, rf_atten, rf_atten_lim = -1;
bbp_atten = *bbp_atten0;
rf_atten = *rf_atten0;
/*
* RF attenuation affects TX power BWI_RF_ATTEN_FACTOR times
* as much as BBP attenuation, so we try our best to keep RF
* attenuation within range. BBP attenuation will be clamped
* later if it is out of range during balancing.
*
* BWI_RF_ATTEN_MAX0 is used as RF attenuation upper limit.
*/
/*
* Use BBP attenuation to balance RF attenuation
*/
if (rf_atten < 0)
rf_atten_lim = 0;
else if (rf_atten > BWI_RF_ATTEN_MAX0)
rf_atten_lim = BWI_RF_ATTEN_MAX0;
if (rf_atten_lim >= 0) {
bbp_atten += (BWI_RF_ATTEN_FACTOR * (rf_atten - rf_atten_lim));
rf_atten = rf_atten_lim;
}
/*
* If possible, use RF attenuation to balance BBP attenuation
* NOTE: RF attenuation is still kept within range.
*/
while (rf_atten < BWI_RF_ATTEN_MAX0 && bbp_atten > BWI_BBP_ATTEN_MAX) {
bbp_atten -= BWI_RF_ATTEN_FACTOR;
++rf_atten;
}
while (rf_atten > 0 && bbp_atten < 0) {
bbp_atten += BWI_RF_ATTEN_FACTOR;
--rf_atten;
}
/* RF attenuation MUST be within range */
KASSERT(rf_atten >= 0 && rf_atten <= BWI_RF_ATTEN_MAX0,
("rf_atten %d", rf_atten));
/*
* Clamp BBP attenuation
*/
if (bbp_atten < 0)
bbp_atten = 0;
else if (bbp_atten > BWI_BBP_ATTEN_MAX)
bbp_atten = BWI_BBP_ATTEN_MAX;
*rf_atten0 = rf_atten;
*bbp_atten0 = bbp_atten;
}
static void
bwi_mac_adjust_tpctl(struct bwi_mac *mac, int rf_atten_adj, int bbp_atten_adj)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_tpctl tpctl;
int bbp_atten, rf_atten, tp_ctrl1;
bcopy(&mac->mac_tpctl, &tpctl, sizeof(tpctl));
/* NOTE: Use signed value to do calulation */
bbp_atten = tpctl.bbp_atten;
rf_atten = tpctl.rf_atten;
tp_ctrl1 = tpctl.tp_ctrl1;
bbp_atten += bbp_atten_adj;
rf_atten += rf_atten_adj;
bwi_mac_balance_atten(&bbp_atten, &rf_atten);
if (rf->rf_type == BWI_RF_T_BCM2050 && rf->rf_rev == 2) {
if (rf_atten <= 1) {
if (tp_ctrl1 == 0) {
tp_ctrl1 = 3;
bbp_atten += 2;
rf_atten += 2;
} else if (sc->sc_card_flags & BWI_CARD_F_PA_GPIO9) {
bbp_atten +=
(BWI_RF_ATTEN_FACTOR * (rf_atten - 2));
rf_atten = 2;
}
} else if (rf_atten > 4 && tp_ctrl1 != 0) {
tp_ctrl1 = 0;
if (bbp_atten < 3) {
bbp_atten += 2;
rf_atten -= 3;
} else {
bbp_atten -= 2;
rf_atten -= 2;
}
}
bwi_mac_balance_atten(&bbp_atten, &rf_atten);
}
tpctl.bbp_atten = bbp_atten;
tpctl.rf_atten = rf_atten;
tpctl.tp_ctrl1 = tp_ctrl1;
bwi_mac_lock(mac);
bwi_mac_set_tpctl_11bg(mac, &tpctl);
bwi_mac_unlock(mac);
}
/*
* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower
*/
void
bwi_mac_calibrate_txpower(struct bwi_mac *mac, enum bwi_txpwrcb_type type)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
int8_t tssi[4], tssi_avg, cur_txpwr;
int error, i, ofdm_tssi;
int txpwr_diff, rf_atten_adj, bbp_atten_adj;
if (!sc->sc_txpwr_calib)
return;
if (mac->mac_flags & BWI_MAC_F_TPCTL_ERROR) {
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_TXPOWER, "%s\n",
"tpctl error happened, can't set txpower");
return;
}
if (BWI_IS_BRCM_BU4306(sc)) {
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_TXPOWER, "%s\n",
"BU4306, can't set txpower");
return;
}
/*
* Save latest TSSI and reset the related memory objects
*/
ofdm_tssi = 0;
error = bwi_rf_get_latest_tssi(mac, tssi, BWI_COMM_MOBJ_TSSI_DS);
if (error) {
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_TXPOWER, "%s\n",
"no DS tssi");
if (mac->mac_phy.phy_mode == IEEE80211_MODE_11B) {
if (type == BWI_TXPWR_FORCE) {
rf_atten_adj = 0;
bbp_atten_adj = 1;
goto calib;
} else {
return;
}
}
error = bwi_rf_get_latest_tssi(mac, tssi,
BWI_COMM_MOBJ_TSSI_OFDM);
if (error) {
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_TXPOWER, "%s\n",
"no OFDM tssi");
if (type == BWI_TXPWR_FORCE) {
rf_atten_adj = 0;
bbp_atten_adj = 1;
goto calib;
} else {
return;
}
}
for (i = 0; i < 4; ++i) {
tssi[i] += 0x20;
tssi[i] &= 0x3f;
}
ofdm_tssi = 1;
}
bwi_rf_clear_tssi(mac);
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_TXPOWER,
"tssi0 %d, tssi1 %d, tssi2 %d, tssi3 %d\n",
tssi[0], tssi[1], tssi[2], tssi[3]);
/*
* Calculate RF/BBP attenuation adjustment based on
* the difference between desired TX power and sampled
* TX power.
*/
/* +8 == "each incremented by 1/2" */
tssi_avg = (tssi[0] + tssi[1] + tssi[2] + tssi[3] + 8) / 4;
if (ofdm_tssi && (HFLAGS_READ(mac) & BWI_HFLAG_PWR_BOOST_DS))
tssi_avg -= 13;
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_TXPOWER, "tssi avg %d\n", tssi_avg);
error = bwi_rf_tssi2dbm(mac, tssi_avg, &cur_txpwr);
if (error)
return;
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_TXPOWER, "current txpower %d\n",
cur_txpwr);
txpwr_diff = rf->rf_txpower_max - cur_txpwr; /* XXX ni_txpower */
rf_atten_adj = -howmany(txpwr_diff, 8);
if (type == BWI_TXPWR_INIT) {
/*
* Move toward EEPROM max TX power as fast as we can
*/
bbp_atten_adj = -txpwr_diff;
} else {
bbp_atten_adj = -(txpwr_diff / 2);
}
bbp_atten_adj -= (BWI_RF_ATTEN_FACTOR * rf_atten_adj);
if (rf_atten_adj == 0 && bbp_atten_adj == 0) {
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_TXPOWER, "%s\n",
"no need to adjust RF/BBP attenuation");
/* TODO: LO */
return;
}
calib:
DPRINTF(sc, BWI_DBG_MAC | BWI_DBG_TXPOWER,
"rf atten adjust %d, bbp atten adjust %d\n",
rf_atten_adj, bbp_atten_adj);
bwi_mac_adjust_tpctl(mac, rf_atten_adj, bbp_atten_adj);
/* TODO: LO */
}
static void
bwi_mac_lock(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
KASSERT((mac->mac_flags & BWI_MAC_F_LOCKED) == 0,
("mac_flags 0x%x", mac->mac_flags));
if (mac->mac_rev < 3)
bwi_mac_stop(mac);
else if (ic->ic_opmode != IEEE80211_M_HOSTAP)
bwi_mac_config_ps(mac);
CSR_SETBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_RFLOCK);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_MAC_STATUS);
DELAY(10);
mac->mac_flags |= BWI_MAC_F_LOCKED;
}
static void
bwi_mac_unlock(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
KASSERT(mac->mac_flags & BWI_MAC_F_LOCKED,
("mac_flags 0x%x", mac->mac_flags));
CSR_READ_2(sc, BWI_PHYINFO); /* dummy read */
CSR_CLRBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_RFLOCK);
if (mac->mac_rev < 3)
bwi_mac_start(mac);
else if (ic->ic_opmode != IEEE80211_M_HOSTAP)
bwi_mac_config_ps(mac);
mac->mac_flags &= ~BWI_MAC_F_LOCKED;
}
void
bwi_mac_set_promisc(struct bwi_mac *mac, int promisc)
{
struct bwi_softc *sc = mac->mac_sc;
if (mac->mac_rev < 5) /* Promisc is always on */
return;
if (promisc)
CSR_SETBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PROMISC);
else
CSR_CLRBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PROMISC);
}
Index: head/sys/dev/bwi/bwiphy.c
===================================================================
--- head/sys/dev/bwi/bwiphy.c (revision 287196)
+++ head/sys/dev/bwi/bwiphy.c (revision 287197)
@@ -1,1023 +1,1023 @@
/*
* Copyright (c) 2007 The DragonFly Project. All rights reserved.
*
* This code is derived from software contributed to The DragonFly Project
* by Sepherosa Ziehau <sepherosa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of The DragonFly Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific, prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $DragonFly: src/sys/dev/netif/bwi/bwiphy.c,v 1.5 2008/01/15 09:01:13 sephe Exp $
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_amrr.h>
#include <machine/bus.h>
#include <dev/bwi/bitops.h>
#include <dev/bwi/if_bwireg.h>
#include <dev/bwi/if_bwivar.h>
#include <dev/bwi/bwimac.h>
#include <dev/bwi/bwirf.h>
#include <dev/bwi/bwiphy.h>
static void bwi_phy_init_11a(struct bwi_mac *);
static void bwi_phy_init_11g(struct bwi_mac *);
static void bwi_phy_init_11b_rev2(struct bwi_mac *);
static void bwi_phy_init_11b_rev4(struct bwi_mac *);
static void bwi_phy_init_11b_rev5(struct bwi_mac *);
static void bwi_phy_init_11b_rev6(struct bwi_mac *);
static void bwi_phy_config_11g(struct bwi_mac *);
static void bwi_phy_config_agc(struct bwi_mac *);
static void bwi_tbl_write_2(struct bwi_mac *mac, uint16_t, uint16_t);
static void bwi_tbl_write_4(struct bwi_mac *mac, uint16_t, uint32_t);
#define SUP_BPHY(num) { .rev = num, .init = bwi_phy_init_11b_rev##num }
static const struct {
uint8_t rev;
void (*init)(struct bwi_mac *);
} bwi_sup_bphy[] = {
SUP_BPHY(2),
SUP_BPHY(4),
SUP_BPHY(5),
SUP_BPHY(6)
};
#undef SUP_BPHY
#define BWI_PHYTBL_WRSSI 0x1000
#define BWI_PHYTBL_NOISE_SCALE 0x1400
#define BWI_PHYTBL_NOISE 0x1800
#define BWI_PHYTBL_ROTOR 0x2000
#define BWI_PHYTBL_DELAY 0x2400
#define BWI_PHYTBL_RSSI 0x4000
#define BWI_PHYTBL_SIGMA_SQ 0x5000
#define BWI_PHYTBL_WRSSI_REV1 0x5400
#define BWI_PHYTBL_FREQ 0x5800
static const uint16_t bwi_phy_freq_11g_rev1[] =
{ BWI_PHY_FREQ_11G_REV1 };
static const uint16_t bwi_phy_noise_11g_rev1[] =
{ BWI_PHY_NOISE_11G_REV1 };
static const uint16_t bwi_phy_noise_11g[] =
{ BWI_PHY_NOISE_11G };
static const uint32_t bwi_phy_rotor_11g_rev1[] =
{ BWI_PHY_ROTOR_11G_REV1 };
static const uint16_t bwi_phy_noise_scale_11g_rev2[] =
{ BWI_PHY_NOISE_SCALE_11G_REV2 };
static const uint16_t bwi_phy_noise_scale_11g_rev7[] =
{ BWI_PHY_NOISE_SCALE_11G_REV7 };
static const uint16_t bwi_phy_noise_scale_11g[] =
{ BWI_PHY_NOISE_SCALE_11G };
static const uint16_t bwi_phy_sigma_sq_11g_rev2[] =
{ BWI_PHY_SIGMA_SQ_11G_REV2 };
static const uint16_t bwi_phy_sigma_sq_11g_rev7[] =
{ BWI_PHY_SIGMA_SQ_11G_REV7 };
static const uint32_t bwi_phy_delay_11g_rev1[] =
{ BWI_PHY_DELAY_11G_REV1 };
void
bwi_phy_write(struct bwi_mac *mac, uint16_t ctrl, uint16_t data)
{
struct bwi_softc *sc = mac->mac_sc;
CSR_WRITE_2(sc, BWI_PHY_CTRL, ctrl);
CSR_WRITE_2(sc, BWI_PHY_DATA, data);
}
uint16_t
bwi_phy_read(struct bwi_mac *mac, uint16_t ctrl)
{
struct bwi_softc *sc = mac->mac_sc;
CSR_WRITE_2(sc, BWI_PHY_CTRL, ctrl);
return CSR_READ_2(sc, BWI_PHY_DATA);
}
int
bwi_phy_attach(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
uint8_t phyrev, phytype, phyver;
uint16_t val;
int i;
/* Get PHY type/revision/version */
val = CSR_READ_2(sc, BWI_PHYINFO);
phyrev = __SHIFTOUT(val, BWI_PHYINFO_REV_MASK);
phytype = __SHIFTOUT(val, BWI_PHYINFO_TYPE_MASK);
phyver = __SHIFTOUT(val, BWI_PHYINFO_VER_MASK);
device_printf(sc->sc_dev, "PHY: type %d, rev %d, ver %d\n",
phytype, phyrev, phyver);
/*
* Verify whether the revision of the PHY type is supported
* Convert PHY type to ieee80211_phymode
*/
switch (phytype) {
case BWI_PHYINFO_TYPE_11A:
if (phyrev >= 4) {
device_printf(sc->sc_dev, "unsupported 11A PHY, "
"rev %u\n", phyrev);
return ENXIO;
}
phy->phy_init = bwi_phy_init_11a;
phy->phy_mode = IEEE80211_MODE_11A;
phy->phy_tbl_ctrl = BWI_PHYR_TBL_CTRL_11A;
phy->phy_tbl_data_lo = BWI_PHYR_TBL_DATA_LO_11A;
phy->phy_tbl_data_hi = BWI_PHYR_TBL_DATA_HI_11A;
break;
case BWI_PHYINFO_TYPE_11B:
#define N(arr) (int)(sizeof(arr) / sizeof(arr[0]))
for (i = 0; i < N(bwi_sup_bphy); ++i) {
if (phyrev == bwi_sup_bphy[i].rev) {
phy->phy_init = bwi_sup_bphy[i].init;
break;
}
}
if (i == N(bwi_sup_bphy)) {
device_printf(sc->sc_dev, "unsupported 11B PHY, "
"rev %u\n", phyrev);
return ENXIO;
}
#undef N
phy->phy_mode = IEEE80211_MODE_11B;
break;
case BWI_PHYINFO_TYPE_11G:
if (phyrev > 8) {
device_printf(sc->sc_dev, "unsupported 11G PHY, "
"rev %u\n", phyrev);
return ENXIO;
}
phy->phy_init = bwi_phy_init_11g;
phy->phy_mode = IEEE80211_MODE_11G;
phy->phy_tbl_ctrl = BWI_PHYR_TBL_CTRL_11G;
phy->phy_tbl_data_lo = BWI_PHYR_TBL_DATA_LO_11G;
phy->phy_tbl_data_hi = BWI_PHYR_TBL_DATA_HI_11G;
break;
default:
device_printf(sc->sc_dev, "unsupported PHY type %d\n",
phytype);
return ENXIO;
}
phy->phy_rev = phyrev;
phy->phy_version = phyver;
return 0;
}
void
bwi_phy_set_bbp_atten(struct bwi_mac *mac, uint16_t bbp_atten)
{
struct bwi_phy *phy = &mac->mac_phy;
uint16_t mask = __BITS(3, 0);
if (phy->phy_version == 0) {
CSR_FILT_SETBITS_2(mac->mac_sc, BWI_BBP_ATTEN, ~mask,
__SHIFTIN(bbp_atten, mask));
} else {
if (phy->phy_version > 1)
mask <<= 2;
else
mask <<= 3;
PHY_FILT_SETBITS(mac, BWI_PHYR_BBP_ATTEN, ~mask,
__SHIFTIN(bbp_atten, mask));
}
}
int
bwi_phy_calibrate(struct bwi_mac *mac)
{
struct bwi_phy *phy = &mac->mac_phy;
/* Dummy read */
CSR_READ_4(mac->mac_sc, BWI_MAC_STATUS);
/* Don't re-init */
if (phy->phy_flags & BWI_PHY_F_CALIBRATED)
return 0;
if (phy->phy_mode == IEEE80211_MODE_11G && phy->phy_rev == 1) {
bwi_mac_reset(mac, 0);
bwi_phy_init_11g(mac);
bwi_mac_reset(mac, 1);
}
phy->phy_flags |= BWI_PHY_F_CALIBRATED;
return 0;
}
static void
bwi_tbl_write_2(struct bwi_mac *mac, uint16_t ofs, uint16_t data)
{
struct bwi_phy *phy = &mac->mac_phy;
KASSERT(phy->phy_tbl_ctrl != 0 && phy->phy_tbl_data_lo != 0,
("phy_tbl_ctrl %d phy_tbl_data_lo %d",
phy->phy_tbl_ctrl, phy->phy_tbl_data_lo));
PHY_WRITE(mac, phy->phy_tbl_ctrl, ofs);
PHY_WRITE(mac, phy->phy_tbl_data_lo, data);
}
static void
bwi_tbl_write_4(struct bwi_mac *mac, uint16_t ofs, uint32_t data)
{
struct bwi_phy *phy = &mac->mac_phy;
KASSERT(phy->phy_tbl_data_lo != 0 && phy->phy_tbl_data_hi != 0 &&
phy->phy_tbl_ctrl != 0,
("phy_tbl_data_lo %d phy_tbl_data_hi %d phy_tbl_ctrl %d",
phy->phy_tbl_data_lo, phy->phy_tbl_data_hi, phy->phy_tbl_ctrl));
PHY_WRITE(mac, phy->phy_tbl_ctrl, ofs);
PHY_WRITE(mac, phy->phy_tbl_data_hi, data >> 16);
PHY_WRITE(mac, phy->phy_tbl_data_lo, data & 0xffff);
}
void
bwi_nrssi_write(struct bwi_mac *mac, uint16_t ofs, int16_t data)
{
PHY_WRITE(mac, BWI_PHYR_NRSSI_CTRL, ofs);
PHY_WRITE(mac, BWI_PHYR_NRSSI_DATA, (uint16_t)data);
}
int16_t
bwi_nrssi_read(struct bwi_mac *mac, uint16_t ofs)
{
PHY_WRITE(mac, BWI_PHYR_NRSSI_CTRL, ofs);
return (int16_t)PHY_READ(mac, BWI_PHYR_NRSSI_DATA);
}
static void
bwi_phy_init_11a(struct bwi_mac *mac)
{
/* TODO:11A */
}
static void
bwi_phy_init_11g(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_rf *rf = &mac->mac_rf;
const struct bwi_tpctl *tpctl = &mac->mac_tpctl;
if (phy->phy_rev == 1)
bwi_phy_init_11b_rev5(mac);
else
bwi_phy_init_11b_rev6(mac);
if (phy->phy_rev >= 2 || (phy->phy_flags & BWI_PHY_F_LINKED))
bwi_phy_config_11g(mac);
if (phy->phy_rev >= 2) {
PHY_WRITE(mac, 0x814, 0);
PHY_WRITE(mac, 0x815, 0);
if (phy->phy_rev == 2) {
PHY_WRITE(mac, 0x811, 0);
PHY_WRITE(mac, 0x15, 0xc0);
} else if (phy->phy_rev > 5) {
PHY_WRITE(mac, 0x811, 0x400);
PHY_WRITE(mac, 0x15, 0xc0);
}
}
if (phy->phy_rev >= 2 || (phy->phy_flags & BWI_PHY_F_LINKED)) {
uint16_t val;
val = PHY_READ(mac, 0x400) & 0xff;
if (val == 3 || val == 5) {
PHY_WRITE(mac, 0x4c2, 0x1816);
PHY_WRITE(mac, 0x4c3, 0x8006);
if (val == 5) {
PHY_FILT_SETBITS(mac, 0x4cc,
0xff, 0x1f00);
}
}
}
if ((phy->phy_rev <= 2 && (phy->phy_flags & BWI_PHY_F_LINKED)) ||
phy->phy_rev >= 2)
PHY_WRITE(mac, 0x47e, 0x78);
if (rf->rf_rev == 8) {
PHY_SETBITS(mac, 0x801, 0x80);
PHY_SETBITS(mac, 0x43e, 0x4);
}
if (phy->phy_rev >= 2 && (phy->phy_flags & BWI_PHY_F_LINKED))
bwi_rf_get_gains(mac);
if (rf->rf_rev != 8)
bwi_rf_init(mac);
if (tpctl->tp_ctrl2 == 0xffff) {
bwi_rf_lo_update(mac);
} else {
if (rf->rf_type == BWI_RF_T_BCM2050 && rf->rf_rev == 8) {
RF_WRITE(mac, 0x52,
(tpctl->tp_ctrl1 << 4) | tpctl->tp_ctrl2);
} else {
RF_FILT_SETBITS(mac, 0x52, 0xfff0, tpctl->tp_ctrl2);
}
if (phy->phy_rev >= 6) {
PHY_FILT_SETBITS(mac, 0x36, 0xfff,
tpctl->tp_ctrl2 << 12);
}
if (sc->sc_card_flags & BWI_CARD_F_PA_GPIO9)
PHY_WRITE(mac, 0x2e, 0x8075);
else
PHY_WRITE(mac, 0x2e, 0x807f);
if (phy->phy_rev < 2)
PHY_WRITE(mac, 0x2f, 0x101);
else
PHY_WRITE(mac, 0x2f, 0x202);
}
if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
bwi_rf_lo_adjust(mac, tpctl);
PHY_WRITE(mac, 0x80f, 0x8078);
}
if ((sc->sc_card_flags & BWI_CARD_F_SW_NRSSI) == 0) {
bwi_rf_init_hw_nrssi_table(mac, 0xffff /* XXX */);
bwi_rf_set_nrssi_thr(mac);
} else if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
if (rf->rf_nrssi[0] == BWI_INVALID_NRSSI) {
KASSERT(rf->rf_nrssi[1] == BWI_INVALID_NRSSI,
("rf_nrssi[1] %d", rf->rf_nrssi[1]));
bwi_rf_calc_nrssi_slope(mac);
} else {
KASSERT(rf->rf_nrssi[1] != BWI_INVALID_NRSSI,
("rf_nrssi[1] %d", rf->rf_nrssi[1]));
bwi_rf_set_nrssi_thr(mac);
}
}
if (rf->rf_rev == 8)
PHY_WRITE(mac, 0x805, 0x3230);
bwi_mac_init_tpctl_11bg(mac);
if (sc->sc_bbp_id == BWI_BBPID_BCM4306 && sc->sc_bbp_pkg == 2) {
PHY_CLRBITS(mac, 0x429, 0x4000);
PHY_CLRBITS(mac, 0x4c3, 0x8000);
}
}
static void
bwi_phy_init_11b_rev2(struct bwi_mac *mac)
{
/* TODO:11B */
- if_printf(mac->mac_sc->sc_ifp,
+ device_printf(mac->mac_sc->sc_dev,
"%s is not implemented yet\n", __func__);
}
static void
bwi_phy_init_11b_rev4(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
uint16_t val, ofs;
u_int chan;
CSR_WRITE_2(sc, BWI_BPHY_CTRL, BWI_BPHY_CTRL_INIT);
PHY_WRITE(mac, 0x20, 0x301c);
PHY_WRITE(mac, 0x26, 0);
PHY_WRITE(mac, 0x30, 0xc6);
PHY_WRITE(mac, 0x88, 0x3e00);
for (ofs = 0, val = 0x3c3d; ofs < 30; ++ofs, val -= 0x202)
PHY_WRITE(mac, 0x89 + ofs, val);
CSR_WRITE_2(sc, BWI_PHY_MAGIC_REG1, BWI_PHY_MAGIC_REG1_VAL1);
chan = rf->rf_curchan;
if (chan == IEEE80211_CHAN_ANY)
chan = 6; /* Force to channel 6 */
bwi_rf_set_chan(mac, chan, 0);
if (rf->rf_type != BWI_RF_T_BCM2050) {
RF_WRITE(mac, 0x75, 0x80);
RF_WRITE(mac, 0x79, 0x81);
}
RF_WRITE(mac, 0x50, 0x20);
RF_WRITE(mac, 0x50, 0x23);
if (rf->rf_type == BWI_RF_T_BCM2050) {
RF_WRITE(mac, 0x50, 0x20);
RF_WRITE(mac, 0x5a, 0x70);
RF_WRITE(mac, 0x5b, 0x7b);
RF_WRITE(mac, 0x5c, 0xb0);
RF_WRITE(mac, 0x7a, 0xf);
PHY_WRITE(mac, 0x38, 0x677);
bwi_rf_init_bcm2050(mac);
}
PHY_WRITE(mac, 0x14, 0x80);
PHY_WRITE(mac, 0x32, 0xca);
if (rf->rf_type == BWI_RF_T_BCM2050)
PHY_WRITE(mac, 0x32, 0xe0);
PHY_WRITE(mac, 0x35, 0x7c2);
bwi_rf_lo_update(mac);
PHY_WRITE(mac, 0x26, 0xcc00);
if (rf->rf_type == BWI_RF_T_BCM2050)
PHY_WRITE(mac, 0x26, 0xce00);
CSR_WRITE_2(sc, BWI_RF_CHAN_EX, 0x1100);
PHY_WRITE(mac, 0x2a, 0x88a3);
if (rf->rf_type == BWI_RF_T_BCM2050)
PHY_WRITE(mac, 0x2a, 0x88c2);
bwi_mac_set_tpctl_11bg(mac, NULL);
if (sc->sc_card_flags & BWI_CARD_F_SW_NRSSI) {
bwi_rf_calc_nrssi_slope(mac);
bwi_rf_set_nrssi_thr(mac);
}
bwi_mac_init_tpctl_11bg(mac);
}
static void
bwi_phy_init_11b_rev5(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_phy *phy = &mac->mac_phy;
u_int orig_chan;
if (phy->phy_version == 1)
RF_SETBITS(mac, 0x7a, 0x50);
if (sc->sc_pci_subvid != PCI_VENDOR_BROADCOM &&
sc->sc_pci_subdid != BWI_PCI_SUBDEVICE_BU4306) {
uint16_t ofs, val;
val = 0x2120;
for (ofs = 0xa8; ofs < 0xc7; ++ofs) {
PHY_WRITE(mac, ofs, val);
val += 0x202;
}
}
PHY_FILT_SETBITS(mac, 0x35, 0xf0ff, 0x700);
if (rf->rf_type == BWI_RF_T_BCM2050)
PHY_WRITE(mac, 0x38, 0x667);
if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
if (rf->rf_type == BWI_RF_T_BCM2050) {
RF_SETBITS(mac, 0x7a, 0x20);
RF_SETBITS(mac, 0x51, 0x4);
}
CSR_WRITE_2(sc, BWI_RF_ANTDIV, 0);
PHY_SETBITS(mac, 0x802, 0x100);
PHY_SETBITS(mac, 0x42b, 0x2000);
PHY_WRITE(mac, 0x1c, 0x186a);
PHY_FILT_SETBITS(mac, 0x13, 0xff, 0x1900);
PHY_FILT_SETBITS(mac, 0x35, 0xffc0, 0x64);
PHY_FILT_SETBITS(mac, 0x5d, 0xff80, 0xa);
}
/* TODO: bad_frame_preempt? */
if (phy->phy_version == 1) {
PHY_WRITE(mac, 0x26, 0xce00);
PHY_WRITE(mac, 0x21, 0x3763);
PHY_WRITE(mac, 0x22, 0x1bc3);
PHY_WRITE(mac, 0x23, 0x6f9);
PHY_WRITE(mac, 0x24, 0x37e);
} else {
PHY_WRITE(mac, 0x26, 0xcc00);
}
PHY_WRITE(mac, 0x30, 0xc6);
CSR_WRITE_2(sc, BWI_BPHY_CTRL, BWI_BPHY_CTRL_INIT);
if (phy->phy_version == 1)
PHY_WRITE(mac, 0x20, 0x3e1c);
else
PHY_WRITE(mac, 0x20, 0x301c);
if (phy->phy_version == 0)
CSR_WRITE_2(sc, BWI_PHY_MAGIC_REG1, BWI_PHY_MAGIC_REG1_VAL1);
/* Force to channel 7 */
orig_chan = rf->rf_curchan;
bwi_rf_set_chan(mac, 7, 0);
if (rf->rf_type != BWI_RF_T_BCM2050) {
RF_WRITE(mac, 0x75, 0x80);
RF_WRITE(mac, 0x79, 0x81);
}
RF_WRITE(mac, 0x50, 0x20);
RF_WRITE(mac, 0x50, 0x23);
if (rf->rf_type == BWI_RF_T_BCM2050) {
RF_WRITE(mac, 0x50, 0x20);
RF_WRITE(mac, 0x5a, 0x70);
}
RF_WRITE(mac, 0x5b, 0x7b);
RF_WRITE(mac, 0x5c, 0xb0);
RF_SETBITS(mac, 0x7a, 0x7);
bwi_rf_set_chan(mac, orig_chan, 0);
PHY_WRITE(mac, 0x14, 0x80);
PHY_WRITE(mac, 0x32, 0xca);
PHY_WRITE(mac, 0x2a, 0x88a3);
bwi_mac_set_tpctl_11bg(mac, NULL);
if (rf->rf_type == BWI_RF_T_BCM2050)
RF_WRITE(mac, 0x5d, 0xd);
CSR_FILT_SETBITS_2(sc, BWI_PHY_MAGIC_REG1, 0xffc0, 0x4);
}
static void
bwi_phy_init_11b_rev6(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_phy *phy = &mac->mac_phy;
uint16_t val, ofs;
u_int orig_chan;
PHY_WRITE(mac, 0x3e, 0x817a);
RF_SETBITS(mac, 0x7a, 0x58);
if (rf->rf_rev == 4 || rf->rf_rev == 5) {
RF_WRITE(mac, 0x51, 0x37);
RF_WRITE(mac, 0x52, 0x70);
RF_WRITE(mac, 0x53, 0xb3);
RF_WRITE(mac, 0x54, 0x9b);
RF_WRITE(mac, 0x5a, 0x88);
RF_WRITE(mac, 0x5b, 0x88);
RF_WRITE(mac, 0x5d, 0x88);
RF_WRITE(mac, 0x5e, 0x88);
RF_WRITE(mac, 0x7d, 0x88);
HFLAGS_SETBITS(mac, BWI_HFLAG_MAGIC1);
} else if (rf->rf_rev == 8) {
RF_WRITE(mac, 0x51, 0);
RF_WRITE(mac, 0x52, 0x40);
RF_WRITE(mac, 0x53, 0xb7);
RF_WRITE(mac, 0x54, 0x98);
RF_WRITE(mac, 0x5a, 0x88);
RF_WRITE(mac, 0x5b, 0x6b);
RF_WRITE(mac, 0x5c, 0xf);
if (sc->sc_card_flags & BWI_CARD_F_ALT_IQ) {
RF_WRITE(mac, 0x5d, 0xfa);
RF_WRITE(mac, 0x5e, 0xd8);
} else {
RF_WRITE(mac, 0x5d, 0xf5);
RF_WRITE(mac, 0x5e, 0xb8);
}
RF_WRITE(mac, 0x73, 0x3);
RF_WRITE(mac, 0x7d, 0xa8);
RF_WRITE(mac, 0x7c, 0x1);
RF_WRITE(mac, 0x7e, 0x8);
}
val = 0x1e1f;
for (ofs = 0x88; ofs < 0x98; ++ofs) {
PHY_WRITE(mac, ofs, val);
val -= 0x202;
}
val = 0x3e3f;
for (ofs = 0x98; ofs < 0xa8; ++ofs) {
PHY_WRITE(mac, ofs, val);
val -= 0x202;
}
val = 0x2120;
for (ofs = 0xa8; ofs < 0xc8; ++ofs) {
PHY_WRITE(mac, ofs, (val & 0x3f3f));
val += 0x202;
/* XXX: delay 10 us to avoid PCI parity errors with BCM4318 */
DELAY(10);
}
if (phy->phy_mode == IEEE80211_MODE_11G) {
RF_SETBITS(mac, 0x7a, 0x20);
RF_SETBITS(mac, 0x51, 0x4);
PHY_SETBITS(mac, 0x802, 0x100);
PHY_SETBITS(mac, 0x42b, 0x2000);
PHY_WRITE(mac, 0x5b, 0);
PHY_WRITE(mac, 0x5c, 0);
}
/* Force to channel 7 */
orig_chan = rf->rf_curchan;
if (orig_chan >= 8)
bwi_rf_set_chan(mac, 1, 0);
else
bwi_rf_set_chan(mac, 13, 0);
RF_WRITE(mac, 0x50, 0x20);
RF_WRITE(mac, 0x50, 0x23);
DELAY(40);
if (rf->rf_rev < 6 || rf->rf_rev == 8) {
RF_SETBITS(mac, 0x7c, 0x2);
RF_WRITE(mac, 0x50, 0x20);
}
if (rf->rf_rev <= 2) {
RF_WRITE(mac, 0x7c, 0x20);
RF_WRITE(mac, 0x5a, 0x70);
RF_WRITE(mac, 0x5b, 0x7b);
RF_WRITE(mac, 0x5c, 0xb0);
}
RF_FILT_SETBITS(mac, 0x7a, 0xf8, 0x7);
bwi_rf_set_chan(mac, orig_chan, 0);
PHY_WRITE(mac, 0x14, 0x200);
if (rf->rf_rev >= 6)
PHY_WRITE(mac, 0x2a, 0x88c2);
else
PHY_WRITE(mac, 0x2a, 0x8ac0);
PHY_WRITE(mac, 0x38, 0x668);
bwi_mac_set_tpctl_11bg(mac, NULL);
if (rf->rf_rev <= 5) {
PHY_FILT_SETBITS(mac, 0x5d, 0xff80, 0x3);
if (rf->rf_rev <= 2)
RF_WRITE(mac, 0x5d, 0xd);
}
if (phy->phy_version == 4) {
CSR_WRITE_2(sc, BWI_PHY_MAGIC_REG1, BWI_PHY_MAGIC_REG1_VAL2);
PHY_CLRBITS(mac, 0x61, 0xf000);
} else {
PHY_FILT_SETBITS(mac, 0x2, 0xffc0, 0x4);
}
if (phy->phy_mode == IEEE80211_MODE_11B) {
CSR_WRITE_2(sc, BWI_BBP_ATTEN, BWI_BBP_ATTEN_MAGIC2);
PHY_WRITE(mac, 0x16, 0x410);
PHY_WRITE(mac, 0x17, 0x820);
PHY_WRITE(mac, 0x62, 0x7);
bwi_rf_init_bcm2050(mac);
bwi_rf_lo_update(mac);
if (sc->sc_card_flags & BWI_CARD_F_SW_NRSSI) {
bwi_rf_calc_nrssi_slope(mac);
bwi_rf_set_nrssi_thr(mac);
}
bwi_mac_init_tpctl_11bg(mac);
} else {
CSR_WRITE_2(sc, BWI_BBP_ATTEN, 0);
}
}
#define N(arr) (int)(sizeof(arr) / sizeof(arr[0]))
static void
bwi_phy_config_11g(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
const uint16_t *tbl;
uint16_t wrd_ofs1, wrd_ofs2;
int i, n;
if (phy->phy_rev == 1) {
PHY_WRITE(mac, 0x406, 0x4f19);
PHY_FILT_SETBITS(mac, 0x429, 0xfc3f, 0x340);
PHY_WRITE(mac, 0x42c, 0x5a);
PHY_WRITE(mac, 0x427, 0x1a);
/* Fill frequency table */
for (i = 0; i < N(bwi_phy_freq_11g_rev1); ++i) {
bwi_tbl_write_2(mac, BWI_PHYTBL_FREQ + i,
bwi_phy_freq_11g_rev1[i]);
}
/* Fill noise table */
for (i = 0; i < N(bwi_phy_noise_11g_rev1); ++i) {
bwi_tbl_write_2(mac, BWI_PHYTBL_NOISE + i,
bwi_phy_noise_11g_rev1[i]);
}
/* Fill rotor table */
for (i = 0; i < N(bwi_phy_rotor_11g_rev1); ++i) {
/* NB: data length is 4 bytes */
bwi_tbl_write_4(mac, BWI_PHYTBL_ROTOR + i,
bwi_phy_rotor_11g_rev1[i]);
}
} else {
bwi_nrssi_write(mac, 0xba98, (int16_t)0x7654); /* XXX */
if (phy->phy_rev == 2) {
PHY_WRITE(mac, 0x4c0, 0x1861);
PHY_WRITE(mac, 0x4c1, 0x271);
} else if (phy->phy_rev > 2) {
PHY_WRITE(mac, 0x4c0, 0x98);
PHY_WRITE(mac, 0x4c1, 0x70);
PHY_WRITE(mac, 0x4c9, 0x80);
}
PHY_SETBITS(mac, 0x42b, 0x800);
/* Fill RSSI table */
for (i = 0; i < 64; ++i)
bwi_tbl_write_2(mac, BWI_PHYTBL_RSSI + i, i);
/* Fill noise table */
for (i = 0; i < N(bwi_phy_noise_11g); ++i) {
bwi_tbl_write_2(mac, BWI_PHYTBL_NOISE + i,
bwi_phy_noise_11g[i]);
}
}
/*
* Fill noise scale table
*/
if (phy->phy_rev <= 2) {
tbl = bwi_phy_noise_scale_11g_rev2;
n = N(bwi_phy_noise_scale_11g_rev2);
} else if (phy->phy_rev >= 7 && (PHY_READ(mac, 0x449) & 0x200)) {
tbl = bwi_phy_noise_scale_11g_rev7;
n = N(bwi_phy_noise_scale_11g_rev7);
} else {
tbl = bwi_phy_noise_scale_11g;
n = N(bwi_phy_noise_scale_11g);
}
for (i = 0; i < n; ++i)
bwi_tbl_write_2(mac, BWI_PHYTBL_NOISE_SCALE + i, tbl[i]);
/*
* Fill sigma square table
*/
if (phy->phy_rev == 2) {
tbl = bwi_phy_sigma_sq_11g_rev2;
n = N(bwi_phy_sigma_sq_11g_rev2);
} else if (phy->phy_rev > 2 && phy->phy_rev <= 8) {
tbl = bwi_phy_sigma_sq_11g_rev7;
n = N(bwi_phy_sigma_sq_11g_rev7);
} else {
tbl = NULL;
n = 0;
}
for (i = 0; i < n; ++i)
bwi_tbl_write_2(mac, BWI_PHYTBL_SIGMA_SQ + i, tbl[i]);
if (phy->phy_rev == 1) {
/* Fill delay table */
for (i = 0; i < N(bwi_phy_delay_11g_rev1); ++i) {
bwi_tbl_write_4(mac, BWI_PHYTBL_DELAY + i,
bwi_phy_delay_11g_rev1[i]);
}
/* Fill WRSSI (Wide-Band RSSI) table */
for (i = 4; i < 20; ++i)
bwi_tbl_write_2(mac, BWI_PHYTBL_WRSSI_REV1 + i, 0x20);
bwi_phy_config_agc(mac);
wrd_ofs1 = 0x5001;
wrd_ofs2 = 0x5002;
} else {
/* Fill WRSSI (Wide-Band RSSI) table */
for (i = 0; i < 0x20; ++i)
bwi_tbl_write_2(mac, BWI_PHYTBL_WRSSI + i, 0x820);
bwi_phy_config_agc(mac);
PHY_READ(mac, 0x400); /* Dummy read */
PHY_WRITE(mac, 0x403, 0x1000);
bwi_tbl_write_2(mac, 0x3c02, 0xf);
bwi_tbl_write_2(mac, 0x3c03, 0x14);
wrd_ofs1 = 0x401;
wrd_ofs2 = 0x402;
}
if (!(BWI_IS_BRCM_BU4306(sc) && sc->sc_pci_revid == 0x17)) {
bwi_tbl_write_2(mac, wrd_ofs1, 0x2);
bwi_tbl_write_2(mac, wrd_ofs2, 0x1);
}
/* phy->phy_flags & BWI_PHY_F_LINKED ? */
if (sc->sc_card_flags & BWI_CARD_F_PA_GPIO9)
PHY_WRITE(mac, 0x46e, 0x3cf);
}
#undef N
/*
* Configure Automatic Gain Controller
*/
static void
bwi_phy_config_agc(struct bwi_mac *mac)
{
struct bwi_phy *phy = &mac->mac_phy;
uint16_t ofs;
ofs = phy->phy_rev == 1 ? 0x4c00 : 0;
bwi_tbl_write_2(mac, ofs, 0xfe);
bwi_tbl_write_2(mac, ofs + 1, 0xd);
bwi_tbl_write_2(mac, ofs + 2, 0x13);
bwi_tbl_write_2(mac, ofs + 3, 0x19);
if (phy->phy_rev == 1) {
bwi_tbl_write_2(mac, 0x1800, 0x2710);
bwi_tbl_write_2(mac, 0x1801, 0x9b83);
bwi_tbl_write_2(mac, 0x1802, 0x9b83);
bwi_tbl_write_2(mac, 0x1803, 0xf8d);
PHY_WRITE(mac, 0x455, 0x4);
}
PHY_FILT_SETBITS(mac, 0x4a5, 0xff, 0x5700);
PHY_FILT_SETBITS(mac, 0x41a, 0xff80, 0xf);
PHY_FILT_SETBITS(mac, 0x41a, 0xc07f, 0x2b80);
PHY_FILT_SETBITS(mac, 0x48c, 0xf0ff, 0x300);
RF_SETBITS(mac, 0x7a, 0x8);
PHY_FILT_SETBITS(mac, 0x4a0, 0xfff0, 0x8);
PHY_FILT_SETBITS(mac, 0x4a1, 0xf0ff, 0x600);
PHY_FILT_SETBITS(mac, 0x4a2, 0xf0ff, 0x700);
PHY_FILT_SETBITS(mac, 0x4a0, 0xf0ff, 0x100);
if (phy->phy_rev == 1)
PHY_FILT_SETBITS(mac, 0x4a2, 0xfff0, 0x7);
PHY_FILT_SETBITS(mac, 0x488, 0xff00, 0x1c);
PHY_FILT_SETBITS(mac, 0x488, 0xc0ff, 0x200);
PHY_FILT_SETBITS(mac, 0x496, 0xff00, 0x1c);
PHY_FILT_SETBITS(mac, 0x489, 0xff00, 0x20);
PHY_FILT_SETBITS(mac, 0x489, 0xc0ff, 0x200);
PHY_FILT_SETBITS(mac, 0x482, 0xff00, 0x2e);
PHY_FILT_SETBITS(mac, 0x496, 0xff, 0x1a00);
PHY_FILT_SETBITS(mac, 0x481, 0xff00, 0x28);
PHY_FILT_SETBITS(mac, 0x481, 0xff, 0x2c00);
if (phy->phy_rev == 1) {
PHY_WRITE(mac, 0x430, 0x92b);
PHY_FILT_SETBITS(mac, 0x41b, 0xffe1, 0x2);
} else {
PHY_CLRBITS(mac, 0x41b, 0x1e);
PHY_WRITE(mac, 0x41f, 0x287a);
PHY_FILT_SETBITS(mac, 0x420, 0xfff0, 0x4);
if (phy->phy_rev >= 6) {
PHY_WRITE(mac, 0x422, 0x287a);
PHY_FILT_SETBITS(mac, 0x420, 0xfff, 0x3000);
}
}
PHY_FILT_SETBITS(mac, 0x4a8, 0x8080, 0x7874);
PHY_WRITE(mac, 0x48e, 0x1c00);
if (phy->phy_rev == 1) {
PHY_FILT_SETBITS(mac, 0x4ab, 0xf0ff, 0x600);
PHY_WRITE(mac, 0x48b, 0x5e);
PHY_FILT_SETBITS(mac, 0x48c, 0xff00, 0x1e);
PHY_WRITE(mac, 0x48d, 0x2);
}
bwi_tbl_write_2(mac, ofs + 0x800, 0);
bwi_tbl_write_2(mac, ofs + 0x801, 7);
bwi_tbl_write_2(mac, ofs + 0x802, 16);
bwi_tbl_write_2(mac, ofs + 0x803, 28);
if (phy->phy_rev >= 6) {
PHY_CLRBITS(mac, 0x426, 0x3);
PHY_CLRBITS(mac, 0x426, 0x1000);
}
}
void
bwi_set_gains(struct bwi_mac *mac, const struct bwi_gains *gains)
{
struct bwi_phy *phy = &mac->mac_phy;
uint16_t tbl_gain_ofs1, tbl_gain_ofs2, tbl_gain;
int i;
if (phy->phy_rev <= 1) {
tbl_gain_ofs1 = 0x5000;
tbl_gain_ofs2 = tbl_gain_ofs1 + 16;
} else {
tbl_gain_ofs1 = 0x400;
tbl_gain_ofs2 = tbl_gain_ofs1 + 8;
}
for (i = 0; i < 4; ++i) {
if (gains != NULL) {
tbl_gain = gains->tbl_gain1;
} else {
/* Bit swap */
tbl_gain = (i & 0x1) << 1;
tbl_gain |= (i & 0x2) >> 1;
}
bwi_tbl_write_2(mac, tbl_gain_ofs1 + i, tbl_gain);
}
for (i = 0; i < 16; ++i) {
if (gains != NULL)
tbl_gain = gains->tbl_gain2;
else
tbl_gain = i;
bwi_tbl_write_2(mac, tbl_gain_ofs2 + i, tbl_gain);
}
if (gains == NULL || (gains != NULL && gains->phy_gain != -1)) {
uint16_t phy_gain1, phy_gain2;
if (gains != NULL) {
phy_gain1 =
((uint16_t)gains->phy_gain << 14) |
((uint16_t)gains->phy_gain << 6);
phy_gain2 = phy_gain1;
} else {
phy_gain1 = 0x4040;
phy_gain2 = 0x4000;
}
PHY_FILT_SETBITS(mac, 0x4a0, 0xbfbf, phy_gain1);
PHY_FILT_SETBITS(mac, 0x4a1, 0xbfbf, phy_gain1);
PHY_FILT_SETBITS(mac, 0x4a2, 0xbfbf, phy_gain2);
}
bwi_mac_dummy_xmit(mac);
}
void
bwi_phy_clear_state(struct bwi_phy *phy)
{
phy->phy_flags &= ~BWI_CLEAR_PHY_FLAGS;
}
Index: head/sys/dev/bwi/bwirf.c
===================================================================
--- head/sys/dev/bwi/bwirf.c (revision 287196)
+++ head/sys/dev/bwi/bwirf.c (revision 287197)
@@ -1,2692 +1,2691 @@
/*
* Copyright (c) 2007 The DragonFly Project. All rights reserved.
*
* This code is derived from software contributed to The DragonFly Project
* by Sepherosa Ziehau <sepherosa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of The DragonFly Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific, prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $DragonFly: src/sys/dev/netif/bwi/bwirf.c,v 1.9 2008/08/21 12:19:33 swildner Exp $
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_bwi.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_amrr.h>
#include <machine/bus.h>
#include <dev/bwi/bitops.h>
#include <dev/bwi/if_bwireg.h>
#include <dev/bwi/if_bwivar.h>
#include <dev/bwi/bwimac.h>
#include <dev/bwi/bwirf.h>
#include <dev/bwi/bwiphy.h>
#define RF_LO_WRITE(mac, lo) bwi_rf_lo_write((mac), (lo))
#define BWI_RF_2GHZ_CHAN(chan) \
(ieee80211_ieee2mhz((chan), IEEE80211_CHAN_2GHZ) - 2400)
#define BWI_DEFAULT_IDLE_TSSI 52
struct rf_saveregs {
uint16_t phy_01;
uint16_t phy_03;
uint16_t phy_0a;
uint16_t phy_15;
uint16_t phy_2a;
uint16_t phy_30;
uint16_t phy_35;
uint16_t phy_60;
uint16_t phy_429;
uint16_t phy_802;
uint16_t phy_811;
uint16_t phy_812;
uint16_t phy_814;
uint16_t phy_815;
uint16_t rf_43;
uint16_t rf_52;
uint16_t rf_7a;
};
#define SAVE_RF_REG(mac, regs, n) (regs)->rf_##n = RF_READ((mac), 0x##n)
#define RESTORE_RF_REG(mac, regs, n) RF_WRITE((mac), 0x##n, (regs)->rf_##n)
#define SAVE_PHY_REG(mac, regs, n) (regs)->phy_##n = PHY_READ((mac), 0x##n)
#define RESTORE_PHY_REG(mac, regs, n) PHY_WRITE((mac), 0x##n, (regs)->phy_##n)
static int bwi_rf_calc_txpower(int8_t *, uint8_t, const int16_t[]);
static void bwi_rf_work_around(struct bwi_mac *, u_int);
static int bwi_rf_gain_max_reached(struct bwi_mac *, int);
static uint16_t bwi_rf_calibval(struct bwi_mac *);
static uint16_t bwi_rf_get_tp_ctrl2(struct bwi_mac *);
static void bwi_rf_lo_update_11b(struct bwi_mac *);
static uint16_t bwi_rf_lo_measure_11b(struct bwi_mac *);
static void bwi_rf_lo_update_11g(struct bwi_mac *);
static uint32_t bwi_rf_lo_devi_measure(struct bwi_mac *, uint16_t);
static void bwi_rf_lo_measure_11g(struct bwi_mac *,
const struct bwi_rf_lo *, struct bwi_rf_lo *, uint8_t);
static uint8_t _bwi_rf_lo_update_11g(struct bwi_mac *, uint16_t);
static void bwi_rf_lo_write(struct bwi_mac *, const struct bwi_rf_lo *);
static void bwi_rf_set_nrssi_ofs_11g(struct bwi_mac *);
static void bwi_rf_calc_nrssi_slope_11b(struct bwi_mac *);
static void bwi_rf_calc_nrssi_slope_11g(struct bwi_mac *);
static void bwi_rf_set_nrssi_thr_11b(struct bwi_mac *);
static void bwi_rf_set_nrssi_thr_11g(struct bwi_mac *);
static void bwi_rf_init_sw_nrssi_table(struct bwi_mac *);
static int bwi_rf_calc_rssi_bcm2050(struct bwi_mac *,
const struct bwi_rxbuf_hdr *);
static int bwi_rf_calc_rssi_bcm2053(struct bwi_mac *,
const struct bwi_rxbuf_hdr *);
static int bwi_rf_calc_rssi_bcm2060(struct bwi_mac *,
const struct bwi_rxbuf_hdr *);
static int bwi_rf_calc_noise_bcm2050(struct bwi_mac *);
static int bwi_rf_calc_noise_bcm2053(struct bwi_mac *);
static int bwi_rf_calc_noise_bcm2060(struct bwi_mac *);
static void bwi_rf_on_11a(struct bwi_mac *);
static void bwi_rf_on_11bg(struct bwi_mac *);
static void bwi_rf_off_11a(struct bwi_mac *);
static void bwi_rf_off_11bg(struct bwi_mac *);
static void bwi_rf_off_11g_rev5(struct bwi_mac *);
static const int8_t bwi_txpower_map_11b[BWI_TSSI_MAX] =
{ BWI_TXPOWER_MAP_11B };
static const int8_t bwi_txpower_map_11g[BWI_TSSI_MAX] =
{ BWI_TXPOWER_MAP_11G };
static __inline int16_t
bwi_nrssi_11g(struct bwi_mac *mac)
{
int16_t val;
#define NRSSI_11G_MASK __BITS(13, 8)
val = (int16_t)__SHIFTOUT(PHY_READ(mac, 0x47f), NRSSI_11G_MASK);
if (val >= 32)
val -= 64;
return val;
#undef NRSSI_11G_MASK
}
static __inline struct bwi_rf_lo *
bwi_get_rf_lo(struct bwi_mac *mac, uint16_t rf_atten, uint16_t bbp_atten)
{
int n;
n = rf_atten + (14 * (bbp_atten / 2));
KASSERT(n < BWI_RFLO_MAX, ("n %d", n));
return &mac->mac_rf.rf_lo[n];
}
static __inline int
bwi_rf_lo_isused(struct bwi_mac *mac, const struct bwi_rf_lo *lo)
{
struct bwi_rf *rf = &mac->mac_rf;
int idx;
idx = lo - rf->rf_lo;
KASSERT(idx >= 0 && idx < BWI_RFLO_MAX, ("idx %d", idx));
return isset(rf->rf_lo_used, idx);
}
void
bwi_rf_write(struct bwi_mac *mac, uint16_t ctrl, uint16_t data)
{
struct bwi_softc *sc = mac->mac_sc;
CSR_WRITE_2(sc, BWI_RF_CTRL, ctrl);
CSR_WRITE_2(sc, BWI_RF_DATA_LO, data);
}
uint16_t
bwi_rf_read(struct bwi_mac *mac, uint16_t ctrl)
{
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_softc *sc = mac->mac_sc;
ctrl |= rf->rf_ctrl_rd;
if (rf->rf_ctrl_adj) {
/* XXX */
if (ctrl < 0x70)
ctrl += 0x80;
else if (ctrl < 0x80)
ctrl += 0x70;
}
CSR_WRITE_2(sc, BWI_RF_CTRL, ctrl);
return CSR_READ_2(sc, BWI_RF_DATA_LO);
}
int
bwi_rf_attach(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_rf *rf = &mac->mac_rf;
uint16_t type, manu;
uint8_t rev;
/*
* Get RF manufacture/type/revision
*/
if (sc->sc_bbp_id == BWI_BBPID_BCM4317) {
/*
* Fake a BCM2050 RF
*/
manu = BWI_RF_MANUFACT_BCM;
type = BWI_RF_T_BCM2050;
if (sc->sc_bbp_rev == 0)
rev = 3;
else if (sc->sc_bbp_rev == 1)
rev = 4;
else
rev = 5;
} else {
uint32_t val;
CSR_WRITE_2(sc, BWI_RF_CTRL, BWI_RF_CTRL_RFINFO);
val = CSR_READ_2(sc, BWI_RF_DATA_HI);
val <<= 16;
CSR_WRITE_2(sc, BWI_RF_CTRL, BWI_RF_CTRL_RFINFO);
val |= CSR_READ_2(sc, BWI_RF_DATA_LO);
manu = __SHIFTOUT(val, BWI_RFINFO_MANUFACT_MASK);
type = __SHIFTOUT(val, BWI_RFINFO_TYPE_MASK);
rev = __SHIFTOUT(val, BWI_RFINFO_REV_MASK);
}
device_printf(sc->sc_dev, "RF: manu 0x%03x, type 0x%04x, rev %u\n",
manu, type, rev);
/*
* Verify whether the RF is supported
*/
rf->rf_ctrl_rd = 0;
rf->rf_ctrl_adj = 0;
switch (phy->phy_mode) {
case IEEE80211_MODE_11A:
if (manu != BWI_RF_MANUFACT_BCM ||
type != BWI_RF_T_BCM2060 ||
rev != 1) {
device_printf(sc->sc_dev, "only BCM2060 rev 1 RF "
"is supported for 11A PHY\n");
return ENXIO;
}
rf->rf_ctrl_rd = BWI_RF_CTRL_RD_11A;
rf->rf_on = bwi_rf_on_11a;
rf->rf_off = bwi_rf_off_11a;
rf->rf_calc_rssi = bwi_rf_calc_rssi_bcm2060;
rf->rf_calc_noise = bwi_rf_calc_noise_bcm2060;
break;
case IEEE80211_MODE_11B:
if (type == BWI_RF_T_BCM2050) {
rf->rf_ctrl_rd = BWI_RF_CTRL_RD_11BG;
rf->rf_calc_rssi = bwi_rf_calc_rssi_bcm2050;
rf->rf_calc_noise = bwi_rf_calc_noise_bcm2050;
} else if (type == BWI_RF_T_BCM2053) {
rf->rf_ctrl_adj = 1;
rf->rf_calc_rssi = bwi_rf_calc_rssi_bcm2053;
rf->rf_calc_noise = bwi_rf_calc_noise_bcm2053;
} else {
device_printf(sc->sc_dev, "only BCM2050/BCM2053 RF "
"is supported for 11B PHY\n");
return ENXIO;
}
rf->rf_on = bwi_rf_on_11bg;
rf->rf_off = bwi_rf_off_11bg;
rf->rf_calc_nrssi_slope = bwi_rf_calc_nrssi_slope_11b;
rf->rf_set_nrssi_thr = bwi_rf_set_nrssi_thr_11b;
if (phy->phy_rev == 6)
rf->rf_lo_update = bwi_rf_lo_update_11g;
else
rf->rf_lo_update = bwi_rf_lo_update_11b;
break;
case IEEE80211_MODE_11G:
if (type != BWI_RF_T_BCM2050) {
device_printf(sc->sc_dev, "only BCM2050 RF "
"is supported for 11G PHY\n");
return ENXIO;
}
rf->rf_ctrl_rd = BWI_RF_CTRL_RD_11BG;
rf->rf_on = bwi_rf_on_11bg;
if (mac->mac_rev >= 5)
rf->rf_off = bwi_rf_off_11g_rev5;
else
rf->rf_off = bwi_rf_off_11bg;
rf->rf_calc_nrssi_slope = bwi_rf_calc_nrssi_slope_11g;
rf->rf_set_nrssi_thr = bwi_rf_set_nrssi_thr_11g;
rf->rf_calc_rssi = bwi_rf_calc_rssi_bcm2050;
rf->rf_calc_noise = bwi_rf_calc_noise_bcm2050;
rf->rf_lo_update = bwi_rf_lo_update_11g;
break;
default:
device_printf(sc->sc_dev, "unsupported PHY mode\n");
return ENXIO;
}
rf->rf_type = type;
rf->rf_rev = rev;
rf->rf_manu = manu;
rf->rf_curchan = IEEE80211_CHAN_ANY;
rf->rf_ant_mode = BWI_ANT_MODE_AUTO;
return 0;
}
void
bwi_rf_set_chan(struct bwi_mac *mac, u_int chan, int work_around)
{
struct bwi_softc *sc = mac->mac_sc;
if (chan == IEEE80211_CHAN_ANY)
return;
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_CHAN, chan);
/* TODO: 11A */
if (work_around)
bwi_rf_work_around(mac, chan);
CSR_WRITE_2(sc, BWI_RF_CHAN, BWI_RF_2GHZ_CHAN(chan));
if (chan == 14) {
if (sc->sc_locale == BWI_SPROM_LOCALE_JAPAN)
HFLAGS_CLRBITS(mac, BWI_HFLAG_NOT_JAPAN);
else
HFLAGS_SETBITS(mac, BWI_HFLAG_NOT_JAPAN);
CSR_SETBITS_2(sc, BWI_RF_CHAN_EX, (1 << 11)); /* XXX */
} else {
CSR_CLRBITS_2(sc, BWI_RF_CHAN_EX, 0x840); /* XXX */
}
DELAY(8000); /* DELAY(2000); */
mac->mac_rf.rf_curchan = chan;
}
void
bwi_rf_get_gains(struct bwi_mac *mac)
{
#define SAVE_PHY_MAX 15
#define SAVE_RF_MAX 3
static const uint16_t save_rf_regs[SAVE_RF_MAX] =
{ 0x52, 0x43, 0x7a };
static const uint16_t save_phy_regs[SAVE_PHY_MAX] = {
0x0429, 0x0001, 0x0811, 0x0812,
0x0814, 0x0815, 0x005a, 0x0059,
0x0058, 0x000a, 0x0003, 0x080f,
0x0810, 0x002b, 0x0015
};
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_rf *rf = &mac->mac_rf;
uint16_t save_phy[SAVE_PHY_MAX];
uint16_t save_rf[SAVE_RF_MAX];
uint16_t trsw;
int i, j, loop1_max, loop1, loop2;
/*
* Save PHY/RF registers for later restoration
*/
for (i = 0; i < SAVE_PHY_MAX; ++i)
save_phy[i] = PHY_READ(mac, save_phy_regs[i]);
PHY_READ(mac, 0x2d); /* dummy read */
for (i = 0; i < SAVE_RF_MAX; ++i)
save_rf[i] = RF_READ(mac, save_rf_regs[i]);
PHY_CLRBITS(mac, 0x429, 0xc000);
PHY_SETBITS(mac, 0x1, 0x8000);
PHY_SETBITS(mac, 0x811, 0x2);
PHY_CLRBITS(mac, 0x812, 0x2);
PHY_SETBITS(mac, 0x811, 0x1);
PHY_CLRBITS(mac, 0x812, 0x1);
PHY_SETBITS(mac, 0x814, 0x1);
PHY_CLRBITS(mac, 0x815, 0x1);
PHY_SETBITS(mac, 0x814, 0x2);
PHY_CLRBITS(mac, 0x815, 0x2);
PHY_SETBITS(mac, 0x811, 0xc);
PHY_SETBITS(mac, 0x812, 0xc);
PHY_SETBITS(mac, 0x811, 0x30);
PHY_FILT_SETBITS(mac, 0x812, 0xffcf, 0x10);
PHY_WRITE(mac, 0x5a, 0x780);
PHY_WRITE(mac, 0x59, 0xc810);
PHY_WRITE(mac, 0x58, 0xd);
PHY_SETBITS(mac, 0xa, 0x2000);
PHY_SETBITS(mac, 0x814, 0x4);
PHY_CLRBITS(mac, 0x815, 0x4);
PHY_FILT_SETBITS(mac, 0x3, 0xff9f, 0x40);
if (rf->rf_rev == 8) {
loop1_max = 15;
RF_WRITE(mac, 0x43, loop1_max);
} else {
loop1_max = 9;
RF_WRITE(mac, 0x52, 0x0);
RF_FILT_SETBITS(mac, 0x43, 0xfff0, loop1_max);
}
bwi_phy_set_bbp_atten(mac, 11);
if (phy->phy_rev >= 3)
PHY_WRITE(mac, 0x80f, 0xc020);
else
PHY_WRITE(mac, 0x80f, 0x8020);
PHY_WRITE(mac, 0x810, 0);
PHY_FILT_SETBITS(mac, 0x2b, 0xffc0, 0x1);
PHY_FILT_SETBITS(mac, 0x2b, 0xc0ff, 0x800);
PHY_SETBITS(mac, 0x811, 0x100);
PHY_CLRBITS(mac, 0x812, 0x3000);
if ((sc->sc_card_flags & BWI_CARD_F_EXT_LNA) &&
phy->phy_rev >= 7) {
PHY_SETBITS(mac, 0x811, 0x800);
PHY_SETBITS(mac, 0x812, 0x8000);
}
RF_CLRBITS(mac, 0x7a, 0xff08);
/*
* Find out 'loop1/loop2', which will be used to calculate
* max loopback gain later
*/
j = 0;
for (i = 0; i < loop1_max; ++i) {
for (j = 0; j < 16; ++j) {
RF_WRITE(mac, 0x43, i);
if (bwi_rf_gain_max_reached(mac, j))
goto loop1_exit;
}
}
loop1_exit:
loop1 = i;
loop2 = j;
/*
* Find out 'trsw', which will be used to calculate
* TRSW(TX/RX switch) RX gain later
*/
if (loop2 >= 8) {
PHY_SETBITS(mac, 0x812, 0x30);
trsw = 0x1b;
for (i = loop2 - 8; i < 16; ++i) {
trsw -= 3;
if (bwi_rf_gain_max_reached(mac, i))
break;
}
} else {
trsw = 0x18;
}
/*
* Restore saved PHY/RF registers
*/
/* First 4 saved PHY registers need special processing */
for (i = 4; i < SAVE_PHY_MAX; ++i)
PHY_WRITE(mac, save_phy_regs[i], save_phy[i]);
bwi_phy_set_bbp_atten(mac, mac->mac_tpctl.bbp_atten);
for (i = 0; i < SAVE_RF_MAX; ++i)
RF_WRITE(mac, save_rf_regs[i], save_rf[i]);
PHY_WRITE(mac, save_phy_regs[2], save_phy[2] | 0x3);
DELAY(10);
PHY_WRITE(mac, save_phy_regs[2], save_phy[2]);
PHY_WRITE(mac, save_phy_regs[3], save_phy[3]);
PHY_WRITE(mac, save_phy_regs[0], save_phy[0]);
PHY_WRITE(mac, save_phy_regs[1], save_phy[1]);
/*
* Calculate gains
*/
rf->rf_lo_gain = (loop2 * 6) - (loop1 * 4) - 11;
rf->rf_rx_gain = trsw * 2;
DPRINTF(mac->mac_sc, BWI_DBG_RF | BWI_DBG_INIT,
"lo gain: %u, rx gain: %u\n",
rf->rf_lo_gain, rf->rf_rx_gain);
#undef SAVE_RF_MAX
#undef SAVE_PHY_MAX
}
void
bwi_rf_init(struct bwi_mac *mac)
{
struct bwi_rf *rf = &mac->mac_rf;
if (rf->rf_type == BWI_RF_T_BCM2060) {
/* TODO: 11A */
} else {
if (rf->rf_flags & BWI_RF_F_INITED)
RF_WRITE(mac, 0x78, rf->rf_calib);
else
bwi_rf_init_bcm2050(mac);
}
}
static void
bwi_rf_off_11a(struct bwi_mac *mac)
{
RF_WRITE(mac, 0x4, 0xff);
RF_WRITE(mac, 0x5, 0xfb);
PHY_SETBITS(mac, 0x10, 0x8);
PHY_SETBITS(mac, 0x11, 0x8);
PHY_WRITE(mac, 0x15, 0xaa00);
}
static void
bwi_rf_off_11bg(struct bwi_mac *mac)
{
PHY_WRITE(mac, 0x15, 0xaa00);
}
static void
bwi_rf_off_11g_rev5(struct bwi_mac *mac)
{
PHY_SETBITS(mac, 0x811, 0x8c);
PHY_CLRBITS(mac, 0x812, 0x8c);
}
static void
bwi_rf_work_around(struct bwi_mac *mac, u_int chan)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
if (chan == IEEE80211_CHAN_ANY) {
device_printf(sc->sc_dev, "%s invalid channel!!\n", __func__);
return;
}
if (rf->rf_type != BWI_RF_T_BCM2050 || rf->rf_rev >= 6)
return;
if (chan <= 10)
CSR_WRITE_2(sc, BWI_RF_CHAN, BWI_RF_2GHZ_CHAN(chan + 4));
else
CSR_WRITE_2(sc, BWI_RF_CHAN, BWI_RF_2GHZ_CHAN(1));
DELAY(1000);
CSR_WRITE_2(sc, BWI_RF_CHAN, BWI_RF_2GHZ_CHAN(chan));
}
static __inline struct bwi_rf_lo *
bwi_rf_lo_find(struct bwi_mac *mac, const struct bwi_tpctl *tpctl)
{
uint16_t rf_atten, bbp_atten;
int remap_rf_atten;
remap_rf_atten = 1;
if (tpctl == NULL) {
bbp_atten = 2;
rf_atten = 3;
} else {
if (tpctl->tp_ctrl1 == 3)
remap_rf_atten = 0;
bbp_atten = tpctl->bbp_atten;
rf_atten = tpctl->rf_atten;
if (bbp_atten > 6)
bbp_atten = 6;
}
if (remap_rf_atten) {
#define MAP_MAX 10
static const uint16_t map[MAP_MAX] =
{ 11, 10, 11, 12, 13, 12, 13, 12, 13, 12 };
#if 0
KASSERT(rf_atten < MAP_MAX, ("rf_atten %d", rf_atten));
rf_atten = map[rf_atten];
#else
if (rf_atten >= MAP_MAX) {
rf_atten = 0; /* XXX */
} else {
rf_atten = map[rf_atten];
}
#endif
#undef MAP_MAX
}
return bwi_get_rf_lo(mac, rf_atten, bbp_atten);
}
void
bwi_rf_lo_adjust(struct bwi_mac *mac, const struct bwi_tpctl *tpctl)
{
const struct bwi_rf_lo *lo;
lo = bwi_rf_lo_find(mac, tpctl);
RF_LO_WRITE(mac, lo);
}
static void
bwi_rf_lo_write(struct bwi_mac *mac, const struct bwi_rf_lo *lo)
{
uint16_t val;
val = (uint8_t)lo->ctrl_lo;
val |= ((uint8_t)lo->ctrl_hi) << 8;
PHY_WRITE(mac, BWI_PHYR_RF_LO, val);
}
static int
bwi_rf_gain_max_reached(struct bwi_mac *mac, int idx)
{
PHY_FILT_SETBITS(mac, 0x812, 0xf0ff, idx << 8);
PHY_FILT_SETBITS(mac, 0x15, 0xfff, 0xa000);
PHY_SETBITS(mac, 0x15, 0xf000);
DELAY(20);
return (PHY_READ(mac, 0x2d) >= 0xdfc);
}
/* XXX use bitmap array */
static __inline uint16_t
bitswap4(uint16_t val)
{
uint16_t ret;
ret = (val & 0x8) >> 3;
ret |= (val & 0x4) >> 1;
ret |= (val & 0x2) << 1;
ret |= (val & 0x1) << 3;
return ret;
}
static __inline uint16_t
bwi_phy812_value(struct bwi_mac *mac, uint16_t lpd)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_rf *rf = &mac->mac_rf;
uint16_t lo_gain, ext_lna, loop;
if ((phy->phy_flags & BWI_PHY_F_LINKED) == 0)
return 0;
lo_gain = rf->rf_lo_gain;
if (rf->rf_rev == 8)
lo_gain += 0x3e;
else
lo_gain += 0x26;
if (lo_gain >= 0x46) {
lo_gain -= 0x46;
ext_lna = 0x3000;
} else if (lo_gain >= 0x3a) {
lo_gain -= 0x3a;
ext_lna = 0x1000;
} else if (lo_gain >= 0x2e) {
lo_gain -= 0x2e;
ext_lna = 0x2000;
} else {
lo_gain -= 0x10;
ext_lna = 0;
}
for (loop = 0; loop < 16; ++loop) {
lo_gain -= (6 * loop);
if (lo_gain < 6)
break;
}
if (phy->phy_rev >= 7 && (sc->sc_card_flags & BWI_CARD_F_EXT_LNA)) {
if (ext_lna)
ext_lna |= 0x8000;
ext_lna |= (loop << 8);
switch (lpd) {
case 0x011:
return 0x8f92;
case 0x001:
return (0x8092 | ext_lna);
case 0x101:
return (0x2092 | ext_lna);
case 0x100:
return (0x2093 | ext_lna);
default:
panic("unsupported lpd\n");
}
} else {
ext_lna |= (loop << 8);
switch (lpd) {
case 0x011:
return 0xf92;
case 0x001:
case 0x101:
return (0x92 | ext_lna);
case 0x100:
return (0x93 | ext_lna);
default:
panic("unsupported lpd\n");
}
}
panic("never reached\n");
return 0;
}
void
bwi_rf_init_bcm2050(struct bwi_mac *mac)
{
#define SAVE_RF_MAX 3
#define SAVE_PHY_COMM_MAX 4
#define SAVE_PHY_11G_MAX 6
static const uint16_t save_rf_regs[SAVE_RF_MAX] =
{ 0x0043, 0x0051, 0x0052 };
static const uint16_t save_phy_regs_comm[SAVE_PHY_COMM_MAX] =
{ 0x0015, 0x005a, 0x0059, 0x0058 };
static const uint16_t save_phy_regs_11g[SAVE_PHY_11G_MAX] =
{ 0x0811, 0x0812, 0x0814, 0x0815, 0x0429, 0x0802 };
uint16_t save_rf[SAVE_RF_MAX];
uint16_t save_phy_comm[SAVE_PHY_COMM_MAX];
uint16_t save_phy_11g[SAVE_PHY_11G_MAX];
uint16_t phyr_35, phyr_30 = 0, rfr_78, phyr_80f = 0, phyr_810 = 0;
uint16_t bphy_ctrl = 0, bbp_atten, rf_chan_ex;
uint16_t phy812_val;
uint16_t calib;
uint32_t test_lim, test;
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_rf *rf = &mac->mac_rf;
int i;
/*
* Save registers for later restoring
*/
for (i = 0; i < SAVE_RF_MAX; ++i)
save_rf[i] = RF_READ(mac, save_rf_regs[i]);
for (i = 0; i < SAVE_PHY_COMM_MAX; ++i)
save_phy_comm[i] = PHY_READ(mac, save_phy_regs_comm[i]);
if (phy->phy_mode == IEEE80211_MODE_11B) {
phyr_30 = PHY_READ(mac, 0x30);
bphy_ctrl = CSR_READ_2(sc, BWI_BPHY_CTRL);
PHY_WRITE(mac, 0x30, 0xff);
CSR_WRITE_2(sc, BWI_BPHY_CTRL, 0x3f3f);
} else if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
for (i = 0; i < SAVE_PHY_11G_MAX; ++i) {
save_phy_11g[i] =
PHY_READ(mac, save_phy_regs_11g[i]);
}
PHY_SETBITS(mac, 0x814, 0x3);
PHY_CLRBITS(mac, 0x815, 0x3);
PHY_CLRBITS(mac, 0x429, 0x8000);
PHY_CLRBITS(mac, 0x802, 0x3);
phyr_80f = PHY_READ(mac, 0x80f);
phyr_810 = PHY_READ(mac, 0x810);
if (phy->phy_rev >= 3)
PHY_WRITE(mac, 0x80f, 0xc020);
else
PHY_WRITE(mac, 0x80f, 0x8020);
PHY_WRITE(mac, 0x810, 0);
phy812_val = bwi_phy812_value(mac, 0x011);
PHY_WRITE(mac, 0x812, phy812_val);
if (phy->phy_rev < 7 ||
(sc->sc_card_flags & BWI_CARD_F_EXT_LNA) == 0)
PHY_WRITE(mac, 0x811, 0x1b3);
else
PHY_WRITE(mac, 0x811, 0x9b3);
}
CSR_SETBITS_2(sc, BWI_RF_ANTDIV, 0x8000);
phyr_35 = PHY_READ(mac, 0x35);
PHY_CLRBITS(mac, 0x35, 0x80);
bbp_atten = CSR_READ_2(sc, BWI_BBP_ATTEN);
rf_chan_ex = CSR_READ_2(sc, BWI_RF_CHAN_EX);
if (phy->phy_version == 0) {
CSR_WRITE_2(sc, BWI_BBP_ATTEN, 0x122);
} else {
if (phy->phy_version >= 2)
PHY_FILT_SETBITS(mac, 0x3, 0xffbf, 0x40);
CSR_SETBITS_2(sc, BWI_RF_CHAN_EX, 0x2000);
}
calib = bwi_rf_calibval(mac);
if (phy->phy_mode == IEEE80211_MODE_11B)
RF_WRITE(mac, 0x78, 0x26);
if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x011);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xbfaf);
PHY_WRITE(mac, 0x2b, 0x1403);
if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x001);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xbfa0);
RF_SETBITS(mac, 0x51, 0x4);
if (rf->rf_rev == 8) {
RF_WRITE(mac, 0x43, 0x1f);
} else {
RF_WRITE(mac, 0x52, 0);
RF_FILT_SETBITS(mac, 0x43, 0xfff0, 0x9);
}
test_lim = 0;
PHY_WRITE(mac, 0x58, 0);
for (i = 0; i < 16; ++i) {
PHY_WRITE(mac, 0x5a, 0x480);
PHY_WRITE(mac, 0x59, 0xc810);
PHY_WRITE(mac, 0x58, 0xd);
if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x101);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xafb0);
DELAY(10);
if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x101);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xefb0);
DELAY(10);
if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x100);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xfff0);
DELAY(20);
test_lim += PHY_READ(mac, 0x2d);
PHY_WRITE(mac, 0x58, 0);
if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x101);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xafb0);
}
++test_lim;
test_lim >>= 9;
DELAY(10);
test = 0;
PHY_WRITE(mac, 0x58, 0);
for (i = 0; i < 16; ++i) {
int j;
rfr_78 = (bitswap4(i) << 1) | 0x20;
RF_WRITE(mac, 0x78, rfr_78);
DELAY(10);
/* NB: This block is slight different than the above one */
for (j = 0; j < 16; ++j) {
PHY_WRITE(mac, 0x5a, 0xd80);
PHY_WRITE(mac, 0x59, 0xc810);
PHY_WRITE(mac, 0x58, 0xd);
if ((phy->phy_flags & BWI_PHY_F_LINKED) ||
phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x101);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xafb0);
DELAY(10);
if ((phy->phy_flags & BWI_PHY_F_LINKED) ||
phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x101);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xefb0);
DELAY(10);
if ((phy->phy_flags & BWI_PHY_F_LINKED) ||
phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x100);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xfff0);
DELAY(10);
test += PHY_READ(mac, 0x2d);
PHY_WRITE(mac, 0x58, 0);
if ((phy->phy_flags & BWI_PHY_F_LINKED) ||
phy->phy_rev >= 2) {
phy812_val = bwi_phy812_value(mac, 0x101);
PHY_WRITE(mac, 0x812, phy812_val);
}
PHY_WRITE(mac, 0x15, 0xafb0);
}
++test;
test >>= 8;
if (test > test_lim)
break;
}
if (i > 15)
rf->rf_calib = rfr_78;
else
rf->rf_calib = calib;
if (rf->rf_calib != 0xffff) {
DPRINTF(sc, BWI_DBG_RF | BWI_DBG_INIT,
"RF calibration value: 0x%04x\n", rf->rf_calib);
rf->rf_flags |= BWI_RF_F_INITED;
}
/*
* Restore trashes registers
*/
PHY_WRITE(mac, save_phy_regs_comm[0], save_phy_comm[0]);
for (i = 0; i < SAVE_RF_MAX; ++i) {
int pos = (i + 1) % SAVE_RF_MAX;
RF_WRITE(mac, save_rf_regs[pos], save_rf[pos]);
}
for (i = 1; i < SAVE_PHY_COMM_MAX; ++i)
PHY_WRITE(mac, save_phy_regs_comm[i], save_phy_comm[i]);
CSR_WRITE_2(sc, BWI_BBP_ATTEN, bbp_atten);
if (phy->phy_version != 0)
CSR_WRITE_2(sc, BWI_RF_CHAN_EX, rf_chan_ex);
PHY_WRITE(mac, 0x35, phyr_35);
bwi_rf_work_around(mac, rf->rf_curchan);
if (phy->phy_mode == IEEE80211_MODE_11B) {
PHY_WRITE(mac, 0x30, phyr_30);
CSR_WRITE_2(sc, BWI_BPHY_CTRL, bphy_ctrl);
} else if ((phy->phy_flags & BWI_PHY_F_LINKED) || phy->phy_rev >= 2) {
/* XXX Spec only says when PHY is linked (gmode) */
CSR_CLRBITS_2(sc, BWI_RF_ANTDIV, 0x8000);
for (i = 0; i < SAVE_PHY_11G_MAX; ++i) {
PHY_WRITE(mac, save_phy_regs_11g[i],
save_phy_11g[i]);
}
PHY_WRITE(mac, 0x80f, phyr_80f);
PHY_WRITE(mac, 0x810, phyr_810);
}
#undef SAVE_PHY_11G_MAX
#undef SAVE_PHY_COMM_MAX
#undef SAVE_RF_MAX
}
static uint16_t
bwi_rf_calibval(struct bwi_mac *mac)
{
/* http://bcm-specs.sipsolutions.net/RCCTable */
static const uint16_t rf_calibvals[] = {
0x2, 0x3, 0x1, 0xf, 0x6, 0x7, 0x5, 0xf,
0xa, 0xb, 0x9, 0xf, 0xe, 0xf, 0xd, 0xf
};
uint16_t val, calib;
int idx;
val = RF_READ(mac, BWI_RFR_BBP_ATTEN);
idx = __SHIFTOUT(val, BWI_RFR_BBP_ATTEN_CALIB_IDX);
KASSERT(idx < (int)(sizeof(rf_calibvals) / sizeof(rf_calibvals[0])),
("idx %d", idx));
calib = rf_calibvals[idx] << 1;
if (val & BWI_RFR_BBP_ATTEN_CALIB_BIT)
calib |= 0x1;
calib |= 0x20;
return calib;
}
static __inline int32_t
_bwi_adjust_devide(int32_t num, int32_t den)
{
if (num < 0)
return (num / den);
else
return (num + den / 2) / den;
}
/*
* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table
* "calculating table entries"
*/
static int
bwi_rf_calc_txpower(int8_t *txpwr, uint8_t idx, const int16_t pa_params[])
{
int32_t m1, m2, f, dbm;
int i;
m1 = _bwi_adjust_devide(16 * pa_params[0] + idx * pa_params[1], 32);
m2 = imax(_bwi_adjust_devide(32768 + idx * pa_params[2], 256), 1);
#define ITER_MAX 16
f = 256;
for (i = 0; i < ITER_MAX; ++i) {
int32_t q, d;
q = _bwi_adjust_devide(
f * 4096 - _bwi_adjust_devide(m2 * f, 16) * f, 2048);
d = abs(q - f);
f = q;
if (d < 2)
break;
}
if (i == ITER_MAX)
return EINVAL;
#undef ITER_MAX
dbm = _bwi_adjust_devide(m1 * f, 8192);
if (dbm < -127)
dbm = -127;
else if (dbm > 128)
dbm = 128;
*txpwr = dbm;
return 0;
}
int
bwi_rf_map_txpower(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_phy *phy = &mac->mac_phy;
uint16_t sprom_ofs, val, mask;
int16_t pa_params[3];
int error = 0, i, ant_gain, reg_txpower_max;
/*
* Find out max TX power
*/
val = bwi_read_sprom(sc, BWI_SPROM_MAX_TXPWR);
if (phy->phy_mode == IEEE80211_MODE_11A) {
rf->rf_txpower_max = __SHIFTOUT(val,
BWI_SPROM_MAX_TXPWR_MASK_11A);
} else {
rf->rf_txpower_max = __SHIFTOUT(val,
BWI_SPROM_MAX_TXPWR_MASK_11BG);
if ((sc->sc_card_flags & BWI_CARD_F_PA_GPIO9) &&
phy->phy_mode == IEEE80211_MODE_11G)
rf->rf_txpower_max -= 3;
}
if (rf->rf_txpower_max <= 0) {
device_printf(sc->sc_dev, "invalid max txpower in sprom\n");
rf->rf_txpower_max = 74;
}
DPRINTF(sc, BWI_DBG_RF | BWI_DBG_TXPOWER | BWI_DBG_ATTACH,
"max txpower from sprom: %d dBm\n", rf->rf_txpower_max);
/*
* Find out region/domain max TX power, which is adjusted
* by antenna gain and 1.5 dBm fluctuation as mentioned
* in v3 spec.
*/
val = bwi_read_sprom(sc, BWI_SPROM_ANT_GAIN);
if (phy->phy_mode == IEEE80211_MODE_11A)
ant_gain = __SHIFTOUT(val, BWI_SPROM_ANT_GAIN_MASK_11A);
else
ant_gain = __SHIFTOUT(val, BWI_SPROM_ANT_GAIN_MASK_11BG);
if (ant_gain == 0xff) {
device_printf(sc->sc_dev, "invalid antenna gain in sprom\n");
ant_gain = 2;
}
ant_gain *= 4;
DPRINTF(sc, BWI_DBG_RF | BWI_DBG_TXPOWER | BWI_DBG_ATTACH,
"ant gain %d dBm\n", ant_gain);
reg_txpower_max = 90 - ant_gain - 6; /* XXX magic number */
DPRINTF(sc, BWI_DBG_RF | BWI_DBG_TXPOWER | BWI_DBG_ATTACH,
"region/domain max txpower %d dBm\n", reg_txpower_max);
/*
* Force max TX power within region/domain TX power limit
*/
if (rf->rf_txpower_max > reg_txpower_max)
rf->rf_txpower_max = reg_txpower_max;
DPRINTF(sc, BWI_DBG_RF | BWI_DBG_TXPOWER | BWI_DBG_ATTACH,
"max txpower %d dBm\n", rf->rf_txpower_max);
/*
* Create TSSI to TX power mapping
*/
if (sc->sc_bbp_id == BWI_BBPID_BCM4301 &&
rf->rf_type != BWI_RF_T_BCM2050) {
rf->rf_idle_tssi0 = BWI_DEFAULT_IDLE_TSSI;
bcopy(bwi_txpower_map_11b, rf->rf_txpower_map0,
sizeof(rf->rf_txpower_map0));
goto back;
}
#define IS_VALID_PA_PARAM(p) ((p) != 0 && (p) != -1)
#define N(arr) (int)(sizeof(arr) / sizeof(arr[0]))
/*
* Extract PA parameters
*/
if (phy->phy_mode == IEEE80211_MODE_11A)
sprom_ofs = BWI_SPROM_PA_PARAM_11A;
else
sprom_ofs = BWI_SPROM_PA_PARAM_11BG;
for (i = 0; i < N(pa_params); ++i)
pa_params[i] = (int16_t)bwi_read_sprom(sc, sprom_ofs + (i * 2));
for (i = 0; i < N(pa_params); ++i) {
/*
* If one of the PA parameters from SPROM is not valid,
* fall back to the default values, if there are any.
*/
if (!IS_VALID_PA_PARAM(pa_params[i])) {
const int8_t *txpower_map;
if (phy->phy_mode == IEEE80211_MODE_11A) {
device_printf(sc->sc_dev,
"no tssi2dbm table for 11a PHY\n");
return ENXIO;
}
if (phy->phy_mode == IEEE80211_MODE_11G) {
DPRINTF(sc,
BWI_DBG_RF | BWI_DBG_TXPOWER | BWI_DBG_ATTACH,
"%s\n", "use default 11g TSSI map");
txpower_map = bwi_txpower_map_11g;
} else {
DPRINTF(sc,
BWI_DBG_RF | BWI_DBG_TXPOWER | BWI_DBG_ATTACH,
"%s\n", "use default 11b TSSI map");
txpower_map = bwi_txpower_map_11b;
}
rf->rf_idle_tssi0 = BWI_DEFAULT_IDLE_TSSI;
bcopy(txpower_map, rf->rf_txpower_map0,
sizeof(rf->rf_txpower_map0));
goto back;
}
}
#undef N
/*
* All of the PA parameters from SPROM are valid.
*/
/*
* Extract idle TSSI from SPROM.
*/
val = bwi_read_sprom(sc, BWI_SPROM_IDLE_TSSI);
DPRINTF(sc, BWI_DBG_RF | BWI_DBG_TXPOWER | BWI_DBG_ATTACH,
"sprom idle tssi: 0x%04x\n", val);
if (phy->phy_mode == IEEE80211_MODE_11A)
mask = BWI_SPROM_IDLE_TSSI_MASK_11A;
else
mask = BWI_SPROM_IDLE_TSSI_MASK_11BG;
rf->rf_idle_tssi0 = (int)__SHIFTOUT(val, mask);
if (!IS_VALID_PA_PARAM(rf->rf_idle_tssi0))
rf->rf_idle_tssi0 = 62;
#undef IS_VALID_PA_PARAM
/*
* Calculate TX power map, which is indexed by TSSI
*/
DPRINTF(sc, BWI_DBG_RF | BWI_DBG_ATTACH | BWI_DBG_TXPOWER,
"%s\n", "TSSI-TX power map:");
for (i = 0; i < BWI_TSSI_MAX; ++i) {
error = bwi_rf_calc_txpower(&rf->rf_txpower_map0[i], i,
pa_params);
if (error) {
device_printf(sc->sc_dev,
"bwi_rf_calc_txpower failed\n");
break;
}
#ifdef BWI_DEBUG
if (i != 0 && i % 8 == 0) {
_DPRINTF(sc,
BWI_DBG_RF | BWI_DBG_ATTACH | BWI_DBG_TXPOWER,
"%s\n", "");
}
#endif
_DPRINTF(sc, BWI_DBG_RF | BWI_DBG_ATTACH | BWI_DBG_TXPOWER,
"%d ", rf->rf_txpower_map0[i]);
}
_DPRINTF(sc, BWI_DBG_RF | BWI_DBG_ATTACH | BWI_DBG_TXPOWER,
"%s\n", "");
back:
DPRINTF(sc, BWI_DBG_RF | BWI_DBG_TXPOWER | BWI_DBG_ATTACH,
"idle tssi0: %d\n", rf->rf_idle_tssi0);
return error;
}
static void
bwi_rf_lo_update_11g(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_tpctl *tpctl = &mac->mac_tpctl;
struct rf_saveregs regs;
uint16_t ant_div, chan_ex;
uint8_t devi_ctrl;
u_int orig_chan;
/*
* Save RF/PHY registers for later restoration
*/
orig_chan = rf->rf_curchan;
bzero(&regs, sizeof(regs));
if (phy->phy_flags & BWI_PHY_F_LINKED) {
SAVE_PHY_REG(mac, &regs, 429);
SAVE_PHY_REG(mac, &regs, 802);
PHY_WRITE(mac, 0x429, regs.phy_429 & 0x7fff);
PHY_WRITE(mac, 0x802, regs.phy_802 & 0xfffc);
}
ant_div = CSR_READ_2(sc, BWI_RF_ANTDIV);
CSR_WRITE_2(sc, BWI_RF_ANTDIV, ant_div | 0x8000);
chan_ex = CSR_READ_2(sc, BWI_RF_CHAN_EX);
SAVE_PHY_REG(mac, &regs, 15);
SAVE_PHY_REG(mac, &regs, 2a);
SAVE_PHY_REG(mac, &regs, 35);
SAVE_PHY_REG(mac, &regs, 60);
SAVE_RF_REG(mac, &regs, 43);
SAVE_RF_REG(mac, &regs, 7a);
SAVE_RF_REG(mac, &regs, 52);
if (phy->phy_flags & BWI_PHY_F_LINKED) {
SAVE_PHY_REG(mac, &regs, 811);
SAVE_PHY_REG(mac, &regs, 812);
SAVE_PHY_REG(mac, &regs, 814);
SAVE_PHY_REG(mac, &regs, 815);
}
/* Force to channel 6 */
bwi_rf_set_chan(mac, 6, 0);
if (phy->phy_flags & BWI_PHY_F_LINKED) {
PHY_WRITE(mac, 0x429, regs.phy_429 & 0x7fff);
PHY_WRITE(mac, 0x802, regs.phy_802 & 0xfffc);
bwi_mac_dummy_xmit(mac);
}
RF_WRITE(mac, 0x43, 0x6);
bwi_phy_set_bbp_atten(mac, 2);
CSR_WRITE_2(sc, BWI_RF_CHAN_EX, 0);
PHY_WRITE(mac, 0x2e, 0x7f);
PHY_WRITE(mac, 0x80f, 0x78);
PHY_WRITE(mac, 0x35, regs.phy_35 & 0xff7f);
RF_WRITE(mac, 0x7a, regs.rf_7a & 0xfff0);
PHY_WRITE(mac, 0x2b, 0x203);
PHY_WRITE(mac, 0x2a, 0x8a3);
if (phy->phy_flags & BWI_PHY_F_LINKED) {
PHY_WRITE(mac, 0x814, regs.phy_814 | 0x3);
PHY_WRITE(mac, 0x815, regs.phy_815 & 0xfffc);
PHY_WRITE(mac, 0x811, 0x1b3);
PHY_WRITE(mac, 0x812, 0xb2);
}
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ if ((sc->sc_flags & BWI_F_RUNNING) == 0)
tpctl->tp_ctrl2 = bwi_rf_get_tp_ctrl2(mac);
PHY_WRITE(mac, 0x80f, 0x8078);
/*
* Measure all RF LO
*/
devi_ctrl = _bwi_rf_lo_update_11g(mac, regs.rf_7a);
/*
* Restore saved RF/PHY registers
*/
if (phy->phy_flags & BWI_PHY_F_LINKED) {
PHY_WRITE(mac, 0x15, 0xe300);
PHY_WRITE(mac, 0x812, (devi_ctrl << 8) | 0xa0);
DELAY(5);
PHY_WRITE(mac, 0x812, (devi_ctrl << 8) | 0xa2);
DELAY(2);
PHY_WRITE(mac, 0x812, (devi_ctrl << 8) | 0xa3);
} else {
PHY_WRITE(mac, 0x15, devi_ctrl | 0xefa0);
}
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ if ((sc->sc_flags & BWI_F_RUNNING) == 0)
tpctl = NULL;
bwi_rf_lo_adjust(mac, tpctl);
PHY_WRITE(mac, 0x2e, 0x807f);
if (phy->phy_flags & BWI_PHY_F_LINKED)
PHY_WRITE(mac, 0x2f, 0x202);
else
PHY_WRITE(mac, 0x2f, 0x101);
CSR_WRITE_2(sc, BWI_RF_CHAN_EX, chan_ex);
RESTORE_PHY_REG(mac, &regs, 15);
RESTORE_PHY_REG(mac, &regs, 2a);
RESTORE_PHY_REG(mac, &regs, 35);
RESTORE_PHY_REG(mac, &regs, 60);
RESTORE_RF_REG(mac, &regs, 43);
RESTORE_RF_REG(mac, &regs, 7a);
regs.rf_52 &= 0xf0;
regs.rf_52 |= (RF_READ(mac, 0x52) & 0xf);
RF_WRITE(mac, 0x52, regs.rf_52);
CSR_WRITE_2(sc, BWI_RF_ANTDIV, ant_div);
if (phy->phy_flags & BWI_PHY_F_LINKED) {
RESTORE_PHY_REG(mac, &regs, 811);
RESTORE_PHY_REG(mac, &regs, 812);
RESTORE_PHY_REG(mac, &regs, 814);
RESTORE_PHY_REG(mac, &regs, 815);
RESTORE_PHY_REG(mac, &regs, 429);
RESTORE_PHY_REG(mac, &regs, 802);
}
bwi_rf_set_chan(mac, orig_chan, 1);
}
static uint32_t
bwi_rf_lo_devi_measure(struct bwi_mac *mac, uint16_t ctrl)
{
struct bwi_phy *phy = &mac->mac_phy;
uint32_t devi = 0;
int i;
if (phy->phy_flags & BWI_PHY_F_LINKED)
ctrl <<= 8;
for (i = 0; i < 8; ++i) {
if (phy->phy_flags & BWI_PHY_F_LINKED) {
PHY_WRITE(mac, 0x15, 0xe300);
PHY_WRITE(mac, 0x812, ctrl | 0xb0);
DELAY(5);
PHY_WRITE(mac, 0x812, ctrl | 0xb2);
DELAY(2);
PHY_WRITE(mac, 0x812, ctrl | 0xb3);
DELAY(4);
PHY_WRITE(mac, 0x15, 0xf300);
} else {
PHY_WRITE(mac, 0x15, ctrl | 0xefa0);
DELAY(2);
PHY_WRITE(mac, 0x15, ctrl | 0xefe0);
DELAY(4);
PHY_WRITE(mac, 0x15, ctrl | 0xffe0);
}
DELAY(8);
devi += PHY_READ(mac, 0x2d);
}
return devi;
}
static uint16_t
bwi_rf_get_tp_ctrl2(struct bwi_mac *mac)
{
uint32_t devi_min;
uint16_t tp_ctrl2 = 0;
int i;
RF_WRITE(mac, 0x52, 0);
DELAY(10);
devi_min = bwi_rf_lo_devi_measure(mac, 0);
for (i = 0; i < 16; ++i) {
uint32_t devi;
RF_WRITE(mac, 0x52, i);
DELAY(10);
devi = bwi_rf_lo_devi_measure(mac, 0);
if (devi < devi_min) {
devi_min = devi;
tp_ctrl2 = i;
}
}
return tp_ctrl2;
}
static uint8_t
_bwi_rf_lo_update_11g(struct bwi_mac *mac, uint16_t orig_rf7a)
{
#define RF_ATTEN_LISTSZ 14
#define BBP_ATTEN_MAX 4 /* half */
static const int rf_atten_list[RF_ATTEN_LISTSZ] =
{ 3, 1, 5, 7, 9, 2, 0, 4, 6, 8, 1, 2, 3, 4 };
static const int rf_atten_init_list[RF_ATTEN_LISTSZ] =
{ 0, 3, 1, 5, 7, 3, 2, 0, 4, 6, -1, -1, -1, -1 };
static const int rf_lo_measure_order[RF_ATTEN_LISTSZ] =
{ 3, 1, 5, 7, 9, 2, 0, 4, 6, 8, 10, 11, 12, 13 };
- struct ifnet *ifp = mac->mac_sc->sc_ifp;
+ struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf_lo lo_save, *lo;
uint8_t devi_ctrl = 0;
int idx, adj_rf7a = 0;
bzero(&lo_save, sizeof(lo_save));
for (idx = 0; idx < RF_ATTEN_LISTSZ; ++idx) {
int init_rf_atten = rf_atten_init_list[idx];
int rf_atten = rf_atten_list[idx];
int bbp_atten;
for (bbp_atten = 0; bbp_atten < BBP_ATTEN_MAX; ++bbp_atten) {
uint16_t tp_ctrl2, rf7a;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if ((sc->sc_flags & BWI_F_RUNNING) == 0) {
if (idx == 0) {
bzero(&lo_save, sizeof(lo_save));
} else if (init_rf_atten < 0) {
lo = bwi_get_rf_lo(mac,
rf_atten, 2 * bbp_atten);
bcopy(lo, &lo_save, sizeof(lo_save));
} else {
lo = bwi_get_rf_lo(mac,
init_rf_atten, 0);
bcopy(lo, &lo_save, sizeof(lo_save));
}
devi_ctrl = 0;
adj_rf7a = 0;
/*
* XXX
* Linux driver overflows 'val'
*/
if (init_rf_atten >= 0) {
int val;
val = rf_atten * 2 + bbp_atten;
if (val > 14) {
adj_rf7a = 1;
if (val > 17)
devi_ctrl = 1;
if (val > 19)
devi_ctrl = 2;
}
}
} else {
lo = bwi_get_rf_lo(mac,
rf_atten, 2 * bbp_atten);
if (!bwi_rf_lo_isused(mac, lo))
continue;
bcopy(lo, &lo_save, sizeof(lo_save));
devi_ctrl = 3;
adj_rf7a = 0;
}
RF_WRITE(mac, BWI_RFR_ATTEN, rf_atten);
tp_ctrl2 = mac->mac_tpctl.tp_ctrl2;
if (init_rf_atten < 0)
tp_ctrl2 |= (3 << 4);
RF_WRITE(mac, BWI_RFR_TXPWR, tp_ctrl2);
DELAY(10);
bwi_phy_set_bbp_atten(mac, bbp_atten * 2);
rf7a = orig_rf7a & 0xfff0;
if (adj_rf7a)
rf7a |= 0x8;
RF_WRITE(mac, 0x7a, rf7a);
lo = bwi_get_rf_lo(mac,
rf_lo_measure_order[idx], bbp_atten * 2);
bwi_rf_lo_measure_11g(mac, &lo_save, lo, devi_ctrl);
}
}
return devi_ctrl;
#undef RF_ATTEN_LISTSZ
#undef BBP_ATTEN_MAX
}
static void
bwi_rf_lo_measure_11g(struct bwi_mac *mac, const struct bwi_rf_lo *src_lo,
struct bwi_rf_lo *dst_lo, uint8_t devi_ctrl)
{
#define LO_ADJUST_MIN 1
#define LO_ADJUST_MAX 8
#define LO_ADJUST(hi, lo) { .ctrl_hi = hi, .ctrl_lo = lo }
static const struct bwi_rf_lo rf_lo_adjust[LO_ADJUST_MAX] = {
LO_ADJUST(1, 1),
LO_ADJUST(1, 0),
LO_ADJUST(1, -1),
LO_ADJUST(0, -1),
LO_ADJUST(-1, -1),
LO_ADJUST(-1, 0),
LO_ADJUST(-1, 1),
LO_ADJUST(0, 1)
};
#undef LO_ADJUST
struct bwi_rf_lo lo_min;
uint32_t devi_min;
int found, loop_count, adjust_state;
bcopy(src_lo, &lo_min, sizeof(lo_min));
RF_LO_WRITE(mac, &lo_min);
devi_min = bwi_rf_lo_devi_measure(mac, devi_ctrl);
loop_count = 12; /* XXX */
adjust_state = 0;
do {
struct bwi_rf_lo lo_base;
int i, fin;
found = 0;
if (adjust_state == 0) {
i = LO_ADJUST_MIN;
fin = LO_ADJUST_MAX;
} else if (adjust_state % 2 == 0) {
i = adjust_state - 1;
fin = adjust_state + 1;
} else {
i = adjust_state - 2;
fin = adjust_state + 2;
}
if (i < LO_ADJUST_MIN)
i += LO_ADJUST_MAX;
KASSERT(i <= LO_ADJUST_MAX && i >= LO_ADJUST_MIN, ("i %d", i));
if (fin > LO_ADJUST_MAX)
fin -= LO_ADJUST_MAX;
KASSERT(fin <= LO_ADJUST_MAX && fin >= LO_ADJUST_MIN,
("fin %d", fin));
bcopy(&lo_min, &lo_base, sizeof(lo_base));
for (;;) {
struct bwi_rf_lo lo;
lo.ctrl_hi = lo_base.ctrl_hi +
rf_lo_adjust[i - 1].ctrl_hi;
lo.ctrl_lo = lo_base.ctrl_lo +
rf_lo_adjust[i - 1].ctrl_lo;
if (abs(lo.ctrl_lo) < 9 && abs(lo.ctrl_hi) < 9) {
uint32_t devi;
RF_LO_WRITE(mac, &lo);
devi = bwi_rf_lo_devi_measure(mac, devi_ctrl);
if (devi < devi_min) {
devi_min = devi;
adjust_state = i;
found = 1;
bcopy(&lo, &lo_min, sizeof(lo_min));
}
}
if (i == fin)
break;
if (i == LO_ADJUST_MAX)
i = LO_ADJUST_MIN;
else
++i;
}
} while (loop_count-- && found);
bcopy(&lo_min, dst_lo, sizeof(*dst_lo));
#undef LO_ADJUST_MIN
#undef LO_ADJUST_MAX
}
static void
bwi_rf_calc_nrssi_slope_11b(struct bwi_mac *mac)
{
#define SAVE_RF_MAX 3
#define SAVE_PHY_MAX 8
static const uint16_t save_rf_regs[SAVE_RF_MAX] =
{ 0x7a, 0x52, 0x43 };
static const uint16_t save_phy_regs[SAVE_PHY_MAX] =
{ 0x30, 0x26, 0x15, 0x2a, 0x20, 0x5a, 0x59, 0x58 };
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
struct bwi_phy *phy = &mac->mac_phy;
uint16_t save_rf[SAVE_RF_MAX];
uint16_t save_phy[SAVE_PHY_MAX];
uint16_t ant_div, bbp_atten, chan_ex;
int16_t nrssi[2];
int i;
/*
* Save RF/PHY registers for later restoration
*/
for (i = 0; i < SAVE_RF_MAX; ++i)
save_rf[i] = RF_READ(mac, save_rf_regs[i]);
for (i = 0; i < SAVE_PHY_MAX; ++i)
save_phy[i] = PHY_READ(mac, save_phy_regs[i]);
ant_div = CSR_READ_2(sc, BWI_RF_ANTDIV);
bbp_atten = CSR_READ_2(sc, BWI_BBP_ATTEN);
chan_ex = CSR_READ_2(sc, BWI_RF_CHAN_EX);
/*
* Calculate nrssi0
*/
if (phy->phy_rev >= 5)
RF_CLRBITS(mac, 0x7a, 0xff80);
else
RF_CLRBITS(mac, 0x7a, 0xfff0);
PHY_WRITE(mac, 0x30, 0xff);
CSR_WRITE_2(sc, BWI_BPHY_CTRL, 0x7f7f);
PHY_WRITE(mac, 0x26, 0);
PHY_SETBITS(mac, 0x15, 0x20);
PHY_WRITE(mac, 0x2a, 0x8a3);
RF_SETBITS(mac, 0x7a, 0x80);
nrssi[0] = (int16_t)PHY_READ(mac, 0x27);
/*
* Calculate nrssi1
*/
RF_CLRBITS(mac, 0x7a, 0xff80);
if (phy->phy_version >= 2)
CSR_WRITE_2(sc, BWI_BBP_ATTEN, 0x40);
else if (phy->phy_version == 0)
CSR_WRITE_2(sc, BWI_BBP_ATTEN, 0x122);
else
CSR_CLRBITS_2(sc, BWI_RF_CHAN_EX, 0xdfff);
PHY_WRITE(mac, 0x20, 0x3f3f);
PHY_WRITE(mac, 0x15, 0xf330);
RF_WRITE(mac, 0x5a, 0x60);
RF_CLRBITS(mac, 0x43, 0xff0f);
PHY_WRITE(mac, 0x5a, 0x480);
PHY_WRITE(mac, 0x59, 0x810);
PHY_WRITE(mac, 0x58, 0xd);
DELAY(20);
nrssi[1] = (int16_t)PHY_READ(mac, 0x27);
/*
* Restore saved RF/PHY registers
*/
PHY_WRITE(mac, save_phy_regs[0], save_phy[0]);
RF_WRITE(mac, save_rf_regs[0], save_rf[0]);
CSR_WRITE_2(sc, BWI_RF_ANTDIV, ant_div);
for (i = 1; i < 4; ++i)
PHY_WRITE(mac, save_phy_regs[i], save_phy[i]);
bwi_rf_work_around(mac, rf->rf_curchan);
if (phy->phy_version != 0)
CSR_WRITE_2(sc, BWI_RF_CHAN_EX, chan_ex);
for (; i < SAVE_PHY_MAX; ++i)
PHY_WRITE(mac, save_phy_regs[i], save_phy[i]);
for (i = 1; i < SAVE_RF_MAX; ++i)
RF_WRITE(mac, save_rf_regs[i], save_rf[i]);
/*
* Install calculated narrow RSSI values
*/
if (nrssi[0] == nrssi[1])
rf->rf_nrssi_slope = 0x10000;
else
rf->rf_nrssi_slope = 0x400000 / (nrssi[0] - nrssi[1]);
if (nrssi[0] <= -4) {
rf->rf_nrssi[0] = nrssi[0];
rf->rf_nrssi[1] = nrssi[1];
}
#undef SAVE_RF_MAX
#undef SAVE_PHY_MAX
}
static void
bwi_rf_set_nrssi_ofs_11g(struct bwi_mac *mac)
{
#define SAVE_RF_MAX 2
#define SAVE_PHY_COMM_MAX 10
#define SAVE_PHY6_MAX 8
static const uint16_t save_rf_regs[SAVE_RF_MAX] =
{ 0x7a, 0x43 };
static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] = {
0x0001, 0x0811, 0x0812, 0x0814,
0x0815, 0x005a, 0x0059, 0x0058,
0x000a, 0x0003
};
static const uint16_t save_phy6_regs[SAVE_PHY6_MAX] = {
0x002e, 0x002f, 0x080f, 0x0810,
0x0801, 0x0060, 0x0014, 0x0478
};
struct bwi_phy *phy = &mac->mac_phy;
uint16_t save_rf[SAVE_RF_MAX];
uint16_t save_phy_comm[SAVE_PHY_COMM_MAX];
uint16_t save_phy6[SAVE_PHY6_MAX];
uint16_t rf7b = 0xffff;
int16_t nrssi;
int i, phy6_idx = 0;
for (i = 0; i < SAVE_PHY_COMM_MAX; ++i)
save_phy_comm[i] = PHY_READ(mac, save_phy_comm_regs[i]);
for (i = 0; i < SAVE_RF_MAX; ++i)
save_rf[i] = RF_READ(mac, save_rf_regs[i]);
PHY_CLRBITS(mac, 0x429, 0x8000);
PHY_FILT_SETBITS(mac, 0x1, 0x3fff, 0x4000);
PHY_SETBITS(mac, 0x811, 0xc);
PHY_FILT_SETBITS(mac, 0x812, 0xfff3, 0x4);
PHY_CLRBITS(mac, 0x802, 0x3);
if (phy->phy_rev >= 6) {
for (i = 0; i < SAVE_PHY6_MAX; ++i)
save_phy6[i] = PHY_READ(mac, save_phy6_regs[i]);
PHY_WRITE(mac, 0x2e, 0);
PHY_WRITE(mac, 0x2f, 0);
PHY_WRITE(mac, 0x80f, 0);
PHY_WRITE(mac, 0x810, 0);
PHY_SETBITS(mac, 0x478, 0x100);
PHY_SETBITS(mac, 0x801, 0x40);
PHY_SETBITS(mac, 0x60, 0x40);
PHY_SETBITS(mac, 0x14, 0x200);
}
RF_SETBITS(mac, 0x7a, 0x70);
RF_SETBITS(mac, 0x7a, 0x80);
DELAY(30);
nrssi = bwi_nrssi_11g(mac);
if (nrssi == 31) {
for (i = 7; i >= 4; --i) {
RF_WRITE(mac, 0x7b, i);
DELAY(20);
nrssi = bwi_nrssi_11g(mac);
if (nrssi < 31 && rf7b == 0xffff)
rf7b = i;
}
if (rf7b == 0xffff)
rf7b = 4;
} else {
struct bwi_gains gains;
RF_CLRBITS(mac, 0x7a, 0xff80);
PHY_SETBITS(mac, 0x814, 0x1);
PHY_CLRBITS(mac, 0x815, 0x1);
PHY_SETBITS(mac, 0x811, 0xc);
PHY_SETBITS(mac, 0x812, 0xc);
PHY_SETBITS(mac, 0x811, 0x30);
PHY_SETBITS(mac, 0x812, 0x30);
PHY_WRITE(mac, 0x5a, 0x480);
PHY_WRITE(mac, 0x59, 0x810);
PHY_WRITE(mac, 0x58, 0xd);
if (phy->phy_version == 0)
PHY_WRITE(mac, 0x3, 0x122);
else
PHY_SETBITS(mac, 0xa, 0x2000);
PHY_SETBITS(mac, 0x814, 0x4);
PHY_CLRBITS(mac, 0x815, 0x4);
PHY_FILT_SETBITS(mac, 0x3, 0xff9f, 0x40);
RF_SETBITS(mac, 0x7a, 0xf);
bzero(&gains, sizeof(gains));
gains.tbl_gain1 = 3;
gains.tbl_gain2 = 0;
gains.phy_gain = 1;
bwi_set_gains(mac, &gains);
RF_FILT_SETBITS(mac, 0x43, 0xf0, 0xf);
DELAY(30);
nrssi = bwi_nrssi_11g(mac);
if (nrssi == -32) {
for (i = 0; i < 4; ++i) {
RF_WRITE(mac, 0x7b, i);
DELAY(20);
nrssi = bwi_nrssi_11g(mac);
if (nrssi > -31 && rf7b == 0xffff)
rf7b = i;
}
if (rf7b == 0xffff)
rf7b = 3;
} else {
rf7b = 0;
}
}
RF_WRITE(mac, 0x7b, rf7b);
/*
* Restore saved RF/PHY registers
*/
if (phy->phy_rev >= 6) {
for (phy6_idx = 0; phy6_idx < 4; ++phy6_idx) {
PHY_WRITE(mac, save_phy6_regs[phy6_idx],
save_phy6[phy6_idx]);
}
}
/* Saved PHY registers 0, 1, 2 are handled later */
for (i = 3; i < SAVE_PHY_COMM_MAX; ++i)
PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]);
for (i = SAVE_RF_MAX - 1; i >= 0; --i)
RF_WRITE(mac, save_rf_regs[i], save_rf[i]);
PHY_SETBITS(mac, 0x802, 0x3);
PHY_SETBITS(mac, 0x429, 0x8000);
bwi_set_gains(mac, NULL);
if (phy->phy_rev >= 6) {
for (; phy6_idx < SAVE_PHY6_MAX; ++phy6_idx) {
PHY_WRITE(mac, save_phy6_regs[phy6_idx],
save_phy6[phy6_idx]);
}
}
PHY_WRITE(mac, save_phy_comm_regs[0], save_phy_comm[0]);
PHY_WRITE(mac, save_phy_comm_regs[2], save_phy_comm[2]);
PHY_WRITE(mac, save_phy_comm_regs[1], save_phy_comm[1]);
#undef SAVE_RF_MAX
#undef SAVE_PHY_COMM_MAX
#undef SAVE_PHY6_MAX
}
static void
bwi_rf_calc_nrssi_slope_11g(struct bwi_mac *mac)
{
#define SAVE_RF_MAX 3
#define SAVE_PHY_COMM_MAX 4
#define SAVE_PHY3_MAX 8
static const uint16_t save_rf_regs[SAVE_RF_MAX] =
{ 0x7a, 0x52, 0x43 };
static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] =
{ 0x15, 0x5a, 0x59, 0x58 };
static const uint16_t save_phy3_regs[SAVE_PHY3_MAX] = {
0x002e, 0x002f, 0x080f, 0x0810,
0x0801, 0x0060, 0x0014, 0x0478
};
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
struct bwi_rf *rf = &mac->mac_rf;
uint16_t save_rf[SAVE_RF_MAX];
uint16_t save_phy_comm[SAVE_PHY_COMM_MAX];
uint16_t save_phy3[SAVE_PHY3_MAX];
uint16_t ant_div, bbp_atten, chan_ex;
struct bwi_gains gains;
int16_t nrssi[2];
int i, phy3_idx = 0;
if (rf->rf_rev >= 9)
return;
else if (rf->rf_rev == 8)
bwi_rf_set_nrssi_ofs_11g(mac);
PHY_CLRBITS(mac, 0x429, 0x8000);
PHY_CLRBITS(mac, 0x802, 0x3);
/*
* Save RF/PHY registers for later restoration
*/
ant_div = CSR_READ_2(sc, BWI_RF_ANTDIV);
CSR_SETBITS_2(sc, BWI_RF_ANTDIV, 0x8000);
for (i = 0; i < SAVE_RF_MAX; ++i)
save_rf[i] = RF_READ(mac, save_rf_regs[i]);
for (i = 0; i < SAVE_PHY_COMM_MAX; ++i)
save_phy_comm[i] = PHY_READ(mac, save_phy_comm_regs[i]);
bbp_atten = CSR_READ_2(sc, BWI_BBP_ATTEN);
chan_ex = CSR_READ_2(sc, BWI_RF_CHAN_EX);
if (phy->phy_rev >= 3) {
for (i = 0; i < SAVE_PHY3_MAX; ++i)
save_phy3[i] = PHY_READ(mac, save_phy3_regs[i]);
PHY_WRITE(mac, 0x2e, 0);
PHY_WRITE(mac, 0x810, 0);
if (phy->phy_rev == 4 || phy->phy_rev == 6 ||
phy->phy_rev == 7) {
PHY_SETBITS(mac, 0x478, 0x100);
PHY_SETBITS(mac, 0x810, 0x40);
} else if (phy->phy_rev == 3 || phy->phy_rev == 5) {
PHY_CLRBITS(mac, 0x810, 0x40);
}
PHY_SETBITS(mac, 0x60, 0x40);
PHY_SETBITS(mac, 0x14, 0x200);
}
/*
* Calculate nrssi0
*/
RF_SETBITS(mac, 0x7a, 0x70);
bzero(&gains, sizeof(gains));
gains.tbl_gain1 = 0;
gains.tbl_gain2 = 8;
gains.phy_gain = 0;
bwi_set_gains(mac, &gains);
RF_CLRBITS(mac, 0x7a, 0xff08);
if (phy->phy_rev >= 2) {
PHY_FILT_SETBITS(mac, 0x811, 0xffcf, 0x30);
PHY_FILT_SETBITS(mac, 0x812, 0xffcf, 0x10);
}
RF_SETBITS(mac, 0x7a, 0x80);
DELAY(20);
nrssi[0] = bwi_nrssi_11g(mac);
/*
* Calculate nrssi1
*/
RF_CLRBITS(mac, 0x7a, 0xff80);
if (phy->phy_version >= 2)
PHY_FILT_SETBITS(mac, 0x3, 0xff9f, 0x40);
CSR_SETBITS_2(sc, BWI_RF_CHAN_EX, 0x2000);
RF_SETBITS(mac, 0x7a, 0xf);
PHY_WRITE(mac, 0x15, 0xf330);
if (phy->phy_rev >= 2) {
PHY_FILT_SETBITS(mac, 0x812, 0xffcf, 0x20);
PHY_FILT_SETBITS(mac, 0x811, 0xffcf, 0x20);
}
bzero(&gains, sizeof(gains));
gains.tbl_gain1 = 3;
gains.tbl_gain2 = 0;
gains.phy_gain = 1;
bwi_set_gains(mac, &gains);
if (rf->rf_rev == 8) {
RF_WRITE(mac, 0x43, 0x1f);
} else {
RF_FILT_SETBITS(mac, 0x52, 0xff0f, 0x60);
RF_FILT_SETBITS(mac, 0x43, 0xfff0, 0x9);
}
PHY_WRITE(mac, 0x5a, 0x480);
PHY_WRITE(mac, 0x59, 0x810);
PHY_WRITE(mac, 0x58, 0xd);
DELAY(20);
nrssi[1] = bwi_nrssi_11g(mac);
/*
* Install calculated narrow RSSI values
*/
if (nrssi[1] == nrssi[0])
rf->rf_nrssi_slope = 0x10000;
else
rf->rf_nrssi_slope = 0x400000 / (nrssi[0] - nrssi[1]);
if (nrssi[0] >= -4) {
rf->rf_nrssi[0] = nrssi[1];
rf->rf_nrssi[1] = nrssi[0];
}
/*
* Restore saved RF/PHY registers
*/
if (phy->phy_rev >= 3) {
for (phy3_idx = 0; phy3_idx < 4; ++phy3_idx) {
PHY_WRITE(mac, save_phy3_regs[phy3_idx],
save_phy3[phy3_idx]);
}
}
if (phy->phy_rev >= 2) {
PHY_CLRBITS(mac, 0x812, 0x30);
PHY_CLRBITS(mac, 0x811, 0x30);
}
for (i = 0; i < SAVE_RF_MAX; ++i)
RF_WRITE(mac, save_rf_regs[i], save_rf[i]);
CSR_WRITE_2(sc, BWI_RF_ANTDIV, ant_div);
CSR_WRITE_2(sc, BWI_BBP_ATTEN, bbp_atten);
CSR_WRITE_2(sc, BWI_RF_CHAN_EX, chan_ex);
for (i = 0; i < SAVE_PHY_COMM_MAX; ++i)
PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]);
bwi_rf_work_around(mac, rf->rf_curchan);
PHY_SETBITS(mac, 0x802, 0x3);
bwi_set_gains(mac, NULL);
PHY_SETBITS(mac, 0x429, 0x8000);
if (phy->phy_rev >= 3) {
for (; phy3_idx < SAVE_PHY3_MAX; ++phy3_idx) {
PHY_WRITE(mac, save_phy3_regs[phy3_idx],
save_phy3[phy3_idx]);
}
}
bwi_rf_init_sw_nrssi_table(mac);
bwi_rf_set_nrssi_thr_11g(mac);
#undef SAVE_RF_MAX
#undef SAVE_PHY_COMM_MAX
#undef SAVE_PHY3_MAX
}
static void
bwi_rf_init_sw_nrssi_table(struct bwi_mac *mac)
{
struct bwi_rf *rf = &mac->mac_rf;
int d, i;
d = 0x1f - rf->rf_nrssi[0];
for (i = 0; i < BWI_NRSSI_TBLSZ; ++i) {
int val;
val = (((i - d) * rf->rf_nrssi_slope) / 0x10000) + 0x3a;
if (val < 0)
val = 0;
else if (val > 0x3f)
val = 0x3f;
rf->rf_nrssi_table[i] = val;
}
}
void
bwi_rf_init_hw_nrssi_table(struct bwi_mac *mac, uint16_t adjust)
{
int i;
for (i = 0; i < BWI_NRSSI_TBLSZ; ++i) {
int16_t val;
val = bwi_nrssi_read(mac, i);
val -= adjust;
if (val < -32)
val = -32;
else if (val > 31)
val = 31;
bwi_nrssi_write(mac, i, val);
}
}
static void
bwi_rf_set_nrssi_thr_11b(struct bwi_mac *mac)
{
struct bwi_rf *rf = &mac->mac_rf;
int32_t thr;
if (rf->rf_type != BWI_RF_T_BCM2050 ||
(mac->mac_sc->sc_card_flags & BWI_CARD_F_SW_NRSSI) == 0)
return;
/*
* Calculate nrssi threshold
*/
if (rf->rf_rev >= 6) {
thr = (rf->rf_nrssi[1] - rf->rf_nrssi[0]) * 32;
thr += 20 * (rf->rf_nrssi[0] + 1);
thr /= 40;
} else {
thr = rf->rf_nrssi[1] - 5;
}
if (thr < 0)
thr = 0;
else if (thr > 0x3e)
thr = 0x3e;
PHY_READ(mac, BWI_PHYR_NRSSI_THR_11B); /* dummy read */
PHY_WRITE(mac, BWI_PHYR_NRSSI_THR_11B, (((uint16_t)thr) << 8) | 0x1c);
if (rf->rf_rev >= 6) {
PHY_WRITE(mac, 0x87, 0xe0d);
PHY_WRITE(mac, 0x86, 0xc0b);
PHY_WRITE(mac, 0x85, 0xa09);
PHY_WRITE(mac, 0x84, 0x808);
PHY_WRITE(mac, 0x83, 0x808);
PHY_WRITE(mac, 0x82, 0x604);
PHY_WRITE(mac, 0x81, 0x302);
PHY_WRITE(mac, 0x80, 0x100);
}
}
static __inline int32_t
_nrssi_threshold(const struct bwi_rf *rf, int32_t val)
{
val *= (rf->rf_nrssi[1] - rf->rf_nrssi[0]);
val += (rf->rf_nrssi[0] << 6);
if (val < 32)
val += 31;
else
val += 32;
val >>= 6;
if (val < -31)
val = -31;
else if (val > 31)
val = 31;
return val;
}
static void
bwi_rf_set_nrssi_thr_11g(struct bwi_mac *mac)
{
int32_t thr1, thr2;
uint16_t thr;
/*
* Find the two nrssi thresholds
*/
if ((mac->mac_phy.phy_flags & BWI_PHY_F_LINKED) == 0 ||
(mac->mac_sc->sc_card_flags & BWI_CARD_F_SW_NRSSI) == 0) {
int16_t nrssi;
nrssi = bwi_nrssi_read(mac, 0x20);
if (nrssi >= 32)
nrssi -= 64;
if (nrssi < 3) {
thr1 = 0x2b;
thr2 = 0x27;
} else {
thr1 = 0x2d;
thr2 = 0x2b;
}
} else {
/* TODO Interfere mode */
thr1 = _nrssi_threshold(&mac->mac_rf, 0x11);
thr2 = _nrssi_threshold(&mac->mac_rf, 0xe);
}
#define NRSSI_THR1_MASK __BITS(5, 0)
#define NRSSI_THR2_MASK __BITS(11, 6)
thr = __SHIFTIN((uint32_t)thr1, NRSSI_THR1_MASK) |
__SHIFTIN((uint32_t)thr2, NRSSI_THR2_MASK);
PHY_FILT_SETBITS(mac, BWI_PHYR_NRSSI_THR_11G, 0xf000, thr);
#undef NRSSI_THR1_MASK
#undef NRSSI_THR2_MASK
}
void
bwi_rf_clear_tssi(struct bwi_mac *mac)
{
/* XXX use function pointer */
if (mac->mac_phy.phy_mode == IEEE80211_MODE_11A) {
/* TODO:11A */
} else {
uint16_t val;
int i;
val = __SHIFTIN(BWI_INVALID_TSSI, BWI_LO_TSSI_MASK) |
__SHIFTIN(BWI_INVALID_TSSI, BWI_HI_TSSI_MASK);
for (i = 0; i < 2; ++i) {
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ,
BWI_COMM_MOBJ_TSSI_DS + (i * 2), val);
}
for (i = 0; i < 2; ++i) {
MOBJ_WRITE_2(mac, BWI_COMM_MOBJ,
BWI_COMM_MOBJ_TSSI_OFDM + (i * 2), val);
}
}
}
void
bwi_rf_clear_state(struct bwi_rf *rf)
{
int i;
rf->rf_flags &= ~BWI_RF_CLEAR_FLAGS;
bzero(rf->rf_lo, sizeof(rf->rf_lo));
bzero(rf->rf_lo_used, sizeof(rf->rf_lo_used));
rf->rf_nrssi_slope = 0;
rf->rf_nrssi[0] = BWI_INVALID_NRSSI;
rf->rf_nrssi[1] = BWI_INVALID_NRSSI;
for (i = 0; i < BWI_NRSSI_TBLSZ; ++i)
rf->rf_nrssi_table[i] = i;
rf->rf_lo_gain = 0;
rf->rf_rx_gain = 0;
bcopy(rf->rf_txpower_map0, rf->rf_txpower_map,
sizeof(rf->rf_txpower_map));
rf->rf_idle_tssi = rf->rf_idle_tssi0;
}
static void
bwi_rf_on_11a(struct bwi_mac *mac)
{
/* TODO:11A */
}
static void
bwi_rf_on_11bg(struct bwi_mac *mac)
{
struct bwi_phy *phy = &mac->mac_phy;
PHY_WRITE(mac, 0x15, 0x8000);
PHY_WRITE(mac, 0x15, 0xcc00);
if (phy->phy_flags & BWI_PHY_F_LINKED)
PHY_WRITE(mac, 0x15, 0xc0);
else
PHY_WRITE(mac, 0x15, 0);
bwi_rf_set_chan(mac, 6 /* XXX */, 1);
}
void
bwi_rf_set_ant_mode(struct bwi_mac *mac, int ant_mode)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_phy *phy = &mac->mac_phy;
uint16_t val;
KASSERT(ant_mode == BWI_ANT_MODE_0 ||
ant_mode == BWI_ANT_MODE_1 ||
ant_mode == BWI_ANT_MODE_AUTO, ("ant_mode %d", ant_mode));
HFLAGS_CLRBITS(mac, BWI_HFLAG_AUTO_ANTDIV);
if (phy->phy_mode == IEEE80211_MODE_11B) {
/* NOTE: v4/v3 conflicts, take v3 */
if (mac->mac_rev == 2)
val = BWI_ANT_MODE_AUTO;
else
val = ant_mode;
val <<= 7;
PHY_FILT_SETBITS(mac, 0x3e2, 0xfe7f, val);
} else { /* 11a/g */
/* XXX reg/value naming */
val = ant_mode << 7;
PHY_FILT_SETBITS(mac, 0x401, 0x7e7f, val);
if (ant_mode == BWI_ANT_MODE_AUTO)
PHY_CLRBITS(mac, 0x42b, 0x100);
if (phy->phy_mode == IEEE80211_MODE_11A) {
/* TODO:11A */
} else { /* 11g */
if (ant_mode == BWI_ANT_MODE_AUTO)
PHY_SETBITS(mac, 0x48c, 0x2000);
else
PHY_CLRBITS(mac, 0x48c, 0x2000);
if (phy->phy_rev >= 2) {
PHY_SETBITS(mac, 0x461, 0x10);
PHY_FILT_SETBITS(mac, 0x4ad, 0xff00, 0x15);
if (phy->phy_rev == 2) {
PHY_WRITE(mac, 0x427, 0x8);
} else {
PHY_FILT_SETBITS(mac, 0x427,
0xff00, 0x8);
}
if (phy->phy_rev >= 6)
PHY_WRITE(mac, 0x49b, 0xdc);
}
}
}
/* XXX v4 set AUTO_ANTDIV unconditionally */
if (ant_mode == BWI_ANT_MODE_AUTO)
HFLAGS_SETBITS(mac, BWI_HFLAG_AUTO_ANTDIV);
val = ant_mode << 8;
MOBJ_FILT_SETBITS_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_TX_BEACON,
0xfc3f, val);
MOBJ_FILT_SETBITS_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_TX_ACK,
0xfc3f, val);
MOBJ_FILT_SETBITS_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_TX_PROBE_RESP,
0xfc3f, val);
/* XXX what's these */
if (phy->phy_mode == IEEE80211_MODE_11B)
CSR_SETBITS_2(sc, 0x5e, 0x4);
CSR_WRITE_4(sc, 0x100, 0x1000000);
if (mac->mac_rev < 5)
CSR_WRITE_4(sc, 0x10c, 0x1000000);
mac->mac_rf.rf_ant_mode = ant_mode;
}
int
bwi_rf_get_latest_tssi(struct bwi_mac *mac, int8_t tssi[], uint16_t ofs)
{
int i;
for (i = 0; i < 4; ) {
uint16_t val;
val = MOBJ_READ_2(mac, BWI_COMM_MOBJ, ofs + i);
tssi[i++] = (int8_t)__SHIFTOUT(val, BWI_LO_TSSI_MASK);
tssi[i++] = (int8_t)__SHIFTOUT(val, BWI_HI_TSSI_MASK);
}
for (i = 0; i < 4; ++i) {
if (tssi[i] == BWI_INVALID_TSSI)
return EINVAL;
}
return 0;
}
int
bwi_rf_tssi2dbm(struct bwi_mac *mac, int8_t tssi, int8_t *txpwr)
{
struct bwi_rf *rf = &mac->mac_rf;
int pwr_idx;
pwr_idx = rf->rf_idle_tssi + (int)tssi - rf->rf_base_tssi;
#if 0
if (pwr_idx < 0 || pwr_idx >= BWI_TSSI_MAX)
return EINVAL;
#else
if (pwr_idx < 0)
pwr_idx = 0;
else if (pwr_idx >= BWI_TSSI_MAX)
pwr_idx = BWI_TSSI_MAX - 1;
#endif
*txpwr = rf->rf_txpower_map[pwr_idx];
return 0;
}
static int
bwi_rf_calc_rssi_bcm2050(struct bwi_mac *mac, const struct bwi_rxbuf_hdr *hdr)
{
uint16_t flags1, flags3;
int rssi, lna_gain;
rssi = hdr->rxh_rssi;
flags1 = le16toh(hdr->rxh_flags1);
flags3 = le16toh(hdr->rxh_flags3);
if (flags1 & BWI_RXH_F1_OFDM) {
if (rssi > 127)
rssi -= 256;
if (flags3 & BWI_RXH_F3_BCM2050_RSSI)
rssi += 17;
else
rssi -= 4;
return rssi;
}
if (mac->mac_sc->sc_card_flags & BWI_CARD_F_SW_NRSSI) {
struct bwi_rf *rf = &mac->mac_rf;
if (rssi >= BWI_NRSSI_TBLSZ)
rssi = BWI_NRSSI_TBLSZ - 1;
rssi = ((31 - (int)rf->rf_nrssi_table[rssi]) * -131) / 128;
rssi -= 67;
} else {
rssi = ((31 - rssi) * -149) / 128;
rssi -= 68;
}
if (mac->mac_phy.phy_mode != IEEE80211_MODE_11G)
return rssi;
if (flags3 & BWI_RXH_F3_BCM2050_RSSI)
rssi += 20;
lna_gain = __SHIFTOUT(le16toh(hdr->rxh_phyinfo),
BWI_RXH_PHYINFO_LNAGAIN);
DPRINTF(mac->mac_sc, BWI_DBG_RF | BWI_DBG_RX,
"lna_gain %d, phyinfo 0x%04x\n",
lna_gain, le16toh(hdr->rxh_phyinfo));
switch (lna_gain) {
case 0:
rssi += 27;
break;
case 1:
rssi += 6;
break;
case 2:
rssi += 12;
break;
case 3:
/*
* XXX
* According to v3 spec, we should do _nothing_ here,
* but it seems that the result RSSI will be too low
* (relative to what ath(4) says). Raise it a little
* bit.
*/
rssi += 5;
break;
default:
panic("impossible lna gain %d", lna_gain);
}
return rssi;
}
static int
bwi_rf_calc_rssi_bcm2053(struct bwi_mac *mac, const struct bwi_rxbuf_hdr *hdr)
{
uint16_t flags1;
int rssi;
rssi = (((int)hdr->rxh_rssi - 11) * 103) / 64;
flags1 = le16toh(hdr->rxh_flags1);
if (flags1 & BWI_RXH_F1_BCM2053_RSSI)
rssi -= 109;
else
rssi -= 83;
return rssi;
}
static int
bwi_rf_calc_rssi_bcm2060(struct bwi_mac *mac, const struct bwi_rxbuf_hdr *hdr)
{
int rssi;
rssi = hdr->rxh_rssi;
if (rssi > 127)
rssi -= 256;
return rssi;
}
static int
bwi_rf_calc_noise_bcm2050(struct bwi_mac *mac)
{
uint16_t val;
int noise;
val = MOBJ_READ_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_RF_NOISE);
noise = (int)val; /* XXX check bounds? */
if (mac->mac_sc->sc_card_flags & BWI_CARD_F_SW_NRSSI) {
struct bwi_rf *rf = &mac->mac_rf;
if (noise >= BWI_NRSSI_TBLSZ)
noise = BWI_NRSSI_TBLSZ - 1;
noise = ((31 - (int)rf->rf_nrssi_table[noise]) * -131) / 128;
noise -= 67;
} else {
noise = ((31 - noise) * -149) / 128;
noise -= 68;
}
return noise;
}
static int
bwi_rf_calc_noise_bcm2053(struct bwi_mac *mac)
{
uint16_t val;
int noise;
val = MOBJ_READ_2(mac, BWI_COMM_MOBJ, BWI_COMM_MOBJ_RF_NOISE);
noise = (int)val; /* XXX check bounds? */
noise = ((noise - 11) * 103) / 64;
noise -= 109;
return noise;
}
static int
bwi_rf_calc_noise_bcm2060(struct bwi_mac *mac)
{
/* XXX Dont know how to calc */
return (BWI_NOISE_FLOOR);
}
static uint16_t
bwi_rf_lo_measure_11b(struct bwi_mac *mac)
{
uint16_t val;
int i;
val = 0;
for (i = 0; i < 10; ++i) {
PHY_WRITE(mac, 0x15, 0xafa0);
DELAY(1);
PHY_WRITE(mac, 0x15, 0xefa0);
DELAY(10);
PHY_WRITE(mac, 0x15, 0xffa0);
DELAY(40);
val += PHY_READ(mac, 0x2c);
}
return val;
}
static void
bwi_rf_lo_update_11b(struct bwi_mac *mac)
{
struct bwi_softc *sc = mac->mac_sc;
struct bwi_rf *rf = &mac->mac_rf;
struct rf_saveregs regs;
uint16_t rf_val, phy_val, min_val, val;
uint16_t rf52, bphy_ctrl;
int i;
DPRINTF(sc, BWI_DBG_RF | BWI_DBG_INIT, "%s enter\n", __func__);
bzero(&regs, sizeof(regs));
bphy_ctrl = 0;
/*
* Save RF/PHY registers for later restoration
*/
SAVE_PHY_REG(mac, &regs, 15);
rf52 = RF_READ(mac, 0x52) & 0xfff0;
if (rf->rf_type == BWI_RF_T_BCM2050) {
SAVE_PHY_REG(mac, &regs, 0a);
SAVE_PHY_REG(mac, &regs, 2a);
SAVE_PHY_REG(mac, &regs, 35);
SAVE_PHY_REG(mac, &regs, 03);
SAVE_PHY_REG(mac, &regs, 01);
SAVE_PHY_REG(mac, &regs, 30);
SAVE_RF_REG(mac, &regs, 43);
SAVE_RF_REG(mac, &regs, 7a);
bphy_ctrl = CSR_READ_2(sc, BWI_BPHY_CTRL);
SAVE_RF_REG(mac, &regs, 52);
regs.rf_52 &= 0xf0;
PHY_WRITE(mac, 0x30, 0xff);
CSR_WRITE_2(sc, BWI_PHY_CTRL, 0x3f3f);
PHY_WRITE(mac, 0x35, regs.phy_35 & 0xff7f);
RF_WRITE(mac, 0x7a, regs.rf_7a & 0xfff0);
}
PHY_WRITE(mac, 0x15, 0xb000);
if (rf->rf_type == BWI_RF_T_BCM2050) {
PHY_WRITE(mac, 0x2b, 0x203);
PHY_WRITE(mac, 0x2a, 0x8a3);
} else {
PHY_WRITE(mac, 0x2b, 0x1402);
}
/*
* Setup RF signal
*/
rf_val = 0;
min_val = UINT16_MAX;
for (i = 0; i < 4; ++i) {
RF_WRITE(mac, 0x52, rf52 | i);
bwi_rf_lo_measure_11b(mac); /* Ignore return value */
}
for (i = 0; i < 10; ++i) {
RF_WRITE(mac, 0x52, rf52 | i);
val = bwi_rf_lo_measure_11b(mac) / 10;
if (val < min_val) {
min_val = val;
rf_val = i;
}
}
RF_WRITE(mac, 0x52, rf52 | rf_val);
/*
* Setup PHY signal
*/
phy_val = 0;
min_val = UINT16_MAX;
for (i = -4; i < 5; i += 2) {
int j;
for (j = -4; j < 5; j += 2) {
uint16_t phy2f;
phy2f = (0x100 * i) + j;
if (j < 0)
phy2f += 0x100;
PHY_WRITE(mac, 0x2f, phy2f);
val = bwi_rf_lo_measure_11b(mac) / 10;
if (val < min_val) {
min_val = val;
phy_val = phy2f;
}
}
}
PHY_WRITE(mac, 0x2f, phy_val + 0x101);
/*
* Restore saved RF/PHY registers
*/
if (rf->rf_type == BWI_RF_T_BCM2050) {
RESTORE_PHY_REG(mac, &regs, 0a);
RESTORE_PHY_REG(mac, &regs, 2a);
RESTORE_PHY_REG(mac, &regs, 35);
RESTORE_PHY_REG(mac, &regs, 03);
RESTORE_PHY_REG(mac, &regs, 01);
RESTORE_PHY_REG(mac, &regs, 30);
RESTORE_RF_REG(mac, &regs, 43);
RESTORE_RF_REG(mac, &regs, 7a);
RF_FILT_SETBITS(mac, 0x52, 0xf, regs.rf_52);
CSR_WRITE_2(sc, BWI_BPHY_CTRL, bphy_ctrl);
}
RESTORE_PHY_REG(mac, &regs, 15);
bwi_rf_work_around(mac, rf->rf_curchan);
}
Index: head/sys/dev/bwi/if_bwi.c
===================================================================
--- head/sys/dev/bwi/if_bwi.c (revision 287196)
+++ head/sys/dev/bwi/if_bwi.c (revision 287197)
@@ -1,4061 +1,3984 @@
/*
* Copyright (c) 2007 The DragonFly Project. All rights reserved.
*
* This code is derived from software contributed to The DragonFly Project
* by Sepherosa Ziehau <sepherosa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of The DragonFly Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific, prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $DragonFly: src/sys/dev/netif/bwi/if_bwi.c,v 1.19 2008/02/15 11:15:38 sephe Exp $
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_bwi.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/taskqueue.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_phy.h>
#include <net80211/ieee80211_ratectl.h>
#include <net/bpf.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <machine/bus.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/bwi/bitops.h>
#include <dev/bwi/if_bwireg.h>
#include <dev/bwi/if_bwivar.h>
#include <dev/bwi/bwimac.h>
#include <dev/bwi/bwirf.h>
struct bwi_clock_freq {
u_int clkfreq_min;
u_int clkfreq_max;
};
struct bwi_myaddr_bssid {
uint8_t myaddr[IEEE80211_ADDR_LEN];
uint8_t bssid[IEEE80211_ADDR_LEN];
} __packed;
static struct ieee80211vap *bwi_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void bwi_vap_delete(struct ieee80211vap *);
-static void bwi_init(void *);
-static int bwi_ioctl(struct ifnet *, u_long, caddr_t);
-static void bwi_start(struct ifnet *);
-static void bwi_start_locked(struct ifnet *);
+static void bwi_init(struct bwi_softc *);
+static void bwi_parent(struct ieee80211com *);
+static int bwi_transmit(struct ieee80211com *, struct mbuf *);
+static void bwi_start_locked(struct bwi_softc *);
static int bwi_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void bwi_watchdog(void *);
static void bwi_scan_start(struct ieee80211com *);
static void bwi_set_channel(struct ieee80211com *);
static void bwi_scan_end(struct ieee80211com *);
static int bwi_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void bwi_updateslot(struct ieee80211com *);
static int bwi_media_change(struct ifnet *);
static void bwi_calibrate(void *);
static int bwi_calc_rssi(struct bwi_softc *, const struct bwi_rxbuf_hdr *);
static int bwi_calc_noise(struct bwi_softc *);
static __inline uint8_t bwi_plcp2rate(uint32_t, enum ieee80211_phytype);
static void bwi_rx_radiotap(struct bwi_softc *, struct mbuf *,
struct bwi_rxbuf_hdr *, const void *, int, int, int);
static void bwi_restart(void *, int);
static void bwi_init_statechg(struct bwi_softc *, int);
static void bwi_stop(struct bwi_softc *, int);
static void bwi_stop_locked(struct bwi_softc *, int);
static int bwi_newbuf(struct bwi_softc *, int, int);
static int bwi_encap(struct bwi_softc *, int, struct mbuf *,
struct ieee80211_node *);
static int bwi_encap_raw(struct bwi_softc *, int, struct mbuf *,
struct ieee80211_node *,
const struct ieee80211_bpf_params *);
static void bwi_init_rxdesc_ring32(struct bwi_softc *, uint32_t,
bus_addr_t, int, int);
static void bwi_reset_rx_ring32(struct bwi_softc *, uint32_t);
static int bwi_init_tx_ring32(struct bwi_softc *, int);
static int bwi_init_rx_ring32(struct bwi_softc *);
static int bwi_init_txstats32(struct bwi_softc *);
static void bwi_free_tx_ring32(struct bwi_softc *, int);
static void bwi_free_rx_ring32(struct bwi_softc *);
static void bwi_free_txstats32(struct bwi_softc *);
static void bwi_setup_rx_desc32(struct bwi_softc *, int, bus_addr_t, int);
static void bwi_setup_tx_desc32(struct bwi_softc *, struct bwi_ring_data *,
int, bus_addr_t, int);
static int bwi_rxeof32(struct bwi_softc *);
static void bwi_start_tx32(struct bwi_softc *, uint32_t, int);
static void bwi_txeof_status32(struct bwi_softc *);
static int bwi_init_tx_ring64(struct bwi_softc *, int);
static int bwi_init_rx_ring64(struct bwi_softc *);
static int bwi_init_txstats64(struct bwi_softc *);
static void bwi_free_tx_ring64(struct bwi_softc *, int);
static void bwi_free_rx_ring64(struct bwi_softc *);
static void bwi_free_txstats64(struct bwi_softc *);
static void bwi_setup_rx_desc64(struct bwi_softc *, int, bus_addr_t, int);
static void bwi_setup_tx_desc64(struct bwi_softc *, struct bwi_ring_data *,
int, bus_addr_t, int);
static int bwi_rxeof64(struct bwi_softc *);
static void bwi_start_tx64(struct bwi_softc *, uint32_t, int);
static void bwi_txeof_status64(struct bwi_softc *);
static int bwi_rxeof(struct bwi_softc *, int);
static void _bwi_txeof(struct bwi_softc *, uint16_t, int, int);
static void bwi_txeof(struct bwi_softc *);
static void bwi_txeof_status(struct bwi_softc *, int);
static void bwi_enable_intrs(struct bwi_softc *, uint32_t);
static void bwi_disable_intrs(struct bwi_softc *, uint32_t);
static int bwi_dma_alloc(struct bwi_softc *);
static void bwi_dma_free(struct bwi_softc *);
static int bwi_dma_ring_alloc(struct bwi_softc *, bus_dma_tag_t,
struct bwi_ring_data *, bus_size_t,
uint32_t);
static int bwi_dma_mbuf_create(struct bwi_softc *);
static void bwi_dma_mbuf_destroy(struct bwi_softc *, int, int);
static int bwi_dma_txstats_alloc(struct bwi_softc *, uint32_t, bus_size_t);
static void bwi_dma_txstats_free(struct bwi_softc *);
static void bwi_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
static void bwi_dma_buf_addr(void *, bus_dma_segment_t *, int,
bus_size_t, int);
static void bwi_power_on(struct bwi_softc *, int);
static int bwi_power_off(struct bwi_softc *, int);
static int bwi_set_clock_mode(struct bwi_softc *, enum bwi_clock_mode);
static int bwi_set_clock_delay(struct bwi_softc *);
static void bwi_get_clock_freq(struct bwi_softc *, struct bwi_clock_freq *);
static int bwi_get_pwron_delay(struct bwi_softc *sc);
static void bwi_set_addr_filter(struct bwi_softc *, uint16_t,
const uint8_t *);
static void bwi_set_bssid(struct bwi_softc *, const uint8_t *);
static void bwi_get_card_flags(struct bwi_softc *);
static void bwi_get_eaddr(struct bwi_softc *, uint16_t, uint8_t *);
static int bwi_bus_attach(struct bwi_softc *);
static int bwi_bbp_attach(struct bwi_softc *);
static int bwi_bbp_power_on(struct bwi_softc *, enum bwi_clock_mode);
static void bwi_bbp_power_off(struct bwi_softc *);
static const char *bwi_regwin_name(const struct bwi_regwin *);
static uint32_t bwi_regwin_disable_bits(struct bwi_softc *);
static void bwi_regwin_info(struct bwi_softc *, uint16_t *, uint8_t *);
static int bwi_regwin_select(struct bwi_softc *, int);
static void bwi_led_attach(struct bwi_softc *);
static void bwi_led_newstate(struct bwi_softc *, enum ieee80211_state);
static void bwi_led_event(struct bwi_softc *, int);
static void bwi_led_blink_start(struct bwi_softc *, int, int);
static void bwi_led_blink_next(void *);
static void bwi_led_blink_end(void *);
static const struct {
uint16_t did_min;
uint16_t did_max;
uint16_t bbp_id;
} bwi_bbpid_map[] = {
{ 0x4301, 0x4301, 0x4301 },
{ 0x4305, 0x4307, 0x4307 },
{ 0x4402, 0x4403, 0x4402 },
{ 0x4610, 0x4615, 0x4610 },
{ 0x4710, 0x4715, 0x4710 },
{ 0x4720, 0x4725, 0x4309 }
};
static const struct {
uint16_t bbp_id;
int nregwin;
} bwi_regwin_count[] = {
{ 0x4301, 5 },
{ 0x4306, 6 },
{ 0x4307, 5 },
{ 0x4310, 8 },
{ 0x4401, 3 },
{ 0x4402, 3 },
{ 0x4610, 9 },
{ 0x4704, 9 },
{ 0x4710, 9 },
{ 0x5365, 7 }
};
#define CLKSRC(src) \
[BWI_CLKSRC_ ## src] = { \
.freq_min = BWI_CLKSRC_ ##src## _FMIN, \
.freq_max = BWI_CLKSRC_ ##src## _FMAX \
}
static const struct {
u_int freq_min;
u_int freq_max;
} bwi_clkfreq[BWI_CLKSRC_MAX] = {
CLKSRC(LP_OSC),
CLKSRC(CS_OSC),
CLKSRC(PCI)
};
#undef CLKSRC
#define VENDOR_LED_ACT(vendor) \
{ \
.vid = PCI_VENDOR_##vendor, \
.led_act = { BWI_VENDOR_LED_ACT_##vendor } \
}
static const struct {
#define PCI_VENDOR_COMPAQ 0x0e11
#define PCI_VENDOR_LINKSYS 0x1737
uint16_t vid;
uint8_t led_act[BWI_LED_MAX];
} bwi_vendor_led_act[] = {
VENDOR_LED_ACT(COMPAQ),
VENDOR_LED_ACT(LINKSYS)
#undef PCI_VENDOR_LINKSYS
#undef PCI_VENDOR_COMPAQ
};
static const uint8_t bwi_default_led_act[BWI_LED_MAX] =
{ BWI_VENDOR_LED_ACT_DEFAULT };
#undef VENDOR_LED_ACT
static const struct {
int on_dur;
int off_dur;
} bwi_led_duration[109] = {
[0] = { 400, 100 },
[2] = { 150, 75 },
[4] = { 90, 45 },
[11] = { 66, 34 },
[12] = { 53, 26 },
[18] = { 42, 21 },
[22] = { 35, 17 },
[24] = { 32, 16 },
[36] = { 21, 10 },
[48] = { 16, 8 },
[72] = { 11, 5 },
[96] = { 9, 4 },
[108] = { 7, 3 }
};
#ifdef BWI_DEBUG
#ifdef BWI_DEBUG_VERBOSE
static uint32_t bwi_debug = BWI_DBG_ATTACH | BWI_DBG_INIT | BWI_DBG_TXPOWER;
#else
static uint32_t bwi_debug;
#endif
TUNABLE_INT("hw.bwi.debug", (int *)&bwi_debug);
#endif /* BWI_DEBUG */
static const uint8_t bwi_zero_addr[IEEE80211_ADDR_LEN];
uint16_t
bwi_read_sprom(struct bwi_softc *sc, uint16_t ofs)
{
return CSR_READ_2(sc, ofs + BWI_SPROM_START);
}
static __inline void
bwi_setup_desc32(struct bwi_softc *sc, struct bwi_desc32 *desc_array,
int ndesc, int desc_idx, bus_addr_t paddr, int buf_len,
int tx)
{
struct bwi_desc32 *desc = &desc_array[desc_idx];
uint32_t ctrl, addr, addr_hi, addr_lo;
addr_lo = __SHIFTOUT(paddr, BWI_DESC32_A_ADDR_MASK);
addr_hi = __SHIFTOUT(paddr, BWI_DESC32_A_FUNC_MASK);
addr = __SHIFTIN(addr_lo, BWI_DESC32_A_ADDR_MASK) |
__SHIFTIN(BWI_DESC32_A_FUNC_TXRX, BWI_DESC32_A_FUNC_MASK);
ctrl = __SHIFTIN(buf_len, BWI_DESC32_C_BUFLEN_MASK) |
__SHIFTIN(addr_hi, BWI_DESC32_C_ADDRHI_MASK);
if (desc_idx == ndesc - 1)
ctrl |= BWI_DESC32_C_EOR;
if (tx) {
/* XXX */
ctrl |= BWI_DESC32_C_FRAME_START |
BWI_DESC32_C_FRAME_END |
BWI_DESC32_C_INTR;
}
desc->addr = htole32(addr);
desc->ctrl = htole32(ctrl);
}
int
bwi_attach(struct bwi_softc *sc)
{
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
device_t dev = sc->sc_dev;
- struct ifnet *ifp;
struct bwi_mac *mac;
struct bwi_phy *phy;
int i, error;
uint8_t bands;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
BWI_LOCK_INIT(sc);
/*
* Initialize taskq and various tasks
*/
sc->sc_tq = taskqueue_create("bwi_taskq", M_NOWAIT | M_ZERO,
taskqueue_thread_enqueue, &sc->sc_tq);
taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(dev));
TASK_INIT(&sc->sc_restart_task, 0, bwi_restart, sc);
-
callout_init_mtx(&sc->sc_calib_ch, &sc->sc_mtx, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
/*
* Initialize sysctl variables
*/
sc->sc_fw_version = BWI_FW_VERSION3;
sc->sc_led_idle = (2350 * hz) / 1000;
sc->sc_led_blink = 1;
sc->sc_txpwr_calib = 1;
#ifdef BWI_DEBUG
sc->sc_debug = bwi_debug;
#endif
bwi_power_on(sc, 1);
error = bwi_bbp_attach(sc);
if (error)
goto fail;
error = bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST);
if (error)
goto fail;
if (BWI_REGWIN_EXIST(&sc->sc_com_regwin)) {
error = bwi_set_clock_delay(sc);
if (error)
goto fail;
error = bwi_set_clock_mode(sc, BWI_CLOCK_MODE_FAST);
if (error)
goto fail;
error = bwi_get_pwron_delay(sc);
if (error)
goto fail;
}
error = bwi_bus_attach(sc);
if (error)
goto fail;
bwi_get_card_flags(sc);
bwi_led_attach(sc);
for (i = 0; i < sc->sc_nmac; ++i) {
struct bwi_regwin *old;
mac = &sc->sc_mac[i];
error = bwi_regwin_switch(sc, &mac->mac_regwin, &old);
if (error)
goto fail;
error = bwi_mac_lateattach(mac);
if (error)
goto fail;
error = bwi_regwin_switch(sc, old, NULL);
if (error)
goto fail;
}
/*
* XXX First MAC is known to exist
* TODO2
*/
mac = &sc->sc_mac[0];
phy = &mac->mac_phy;
bwi_bbp_power_off(sc);
error = bwi_dma_alloc(sc);
if (error)
goto fail;
error = bwi_mac_fw_alloc(mac);
if (error)
goto fail;
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
- ic = ifp->if_l2com;
-
- /* set these up early for if_printf use */
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
-
- ifp->if_softc = sc;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = bwi_init;
- ifp->if_ioctl = bwi_ioctl;
- ifp->if_start = bwi_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
callout_init_mtx(&sc->sc_watchdog_timer, &sc->sc_mtx, 0);
/*
* Setup ratesets, phytype, channels and get MAC address
*/
bands = 0;
if (phy->phy_mode == IEEE80211_MODE_11B ||
phy->phy_mode == IEEE80211_MODE_11G) {
setbit(&bands, IEEE80211_MODE_11B);
if (phy->phy_mode == IEEE80211_MODE_11B) {
ic->ic_phytype = IEEE80211_T_DS;
} else {
ic->ic_phytype = IEEE80211_T_OFDM;
setbit(&bands, IEEE80211_MODE_11G);
}
- bwi_get_eaddr(sc, BWI_SPROM_11BG_EADDR, macaddr);
- if (IEEE80211_IS_MULTICAST(macaddr)) {
- bwi_get_eaddr(sc, BWI_SPROM_11A_EADDR, macaddr);
- if (IEEE80211_IS_MULTICAST(macaddr)) {
+ bwi_get_eaddr(sc, BWI_SPROM_11BG_EADDR, ic->ic_macaddr);
+ if (IEEE80211_IS_MULTICAST(ic->ic_macaddr)) {
+ bwi_get_eaddr(sc, BWI_SPROM_11A_EADDR, ic->ic_macaddr);
+ if (IEEE80211_IS_MULTICAST(ic->ic_macaddr)) {
device_printf(dev,
"invalid MAC address: %6D\n",
- macaddr, ":");
+ ic->ic_macaddr, ":");
}
}
} else if (phy->phy_mode == IEEE80211_MODE_11A) {
/* TODO:11A */
setbit(&bands, IEEE80211_MODE_11A);
error = ENXIO;
goto fail;
} else {
panic("unknown phymode %d\n", phy->phy_mode);
}
/* Get locale */
sc->sc_locale = __SHIFTOUT(bwi_read_sprom(sc, BWI_SPROM_CARD_INFO),
BWI_SPROM_CARD_INFO_LOCALE);
DPRINTF(sc, BWI_DBG_ATTACH, "locale: %d\n", sc->sc_locale);
/* XXX use locale */
ieee80211_init_channels(ic, NULL, &bands);
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_caps = IEEE80211_C_STA |
IEEE80211_C_SHSLOT |
IEEE80211_C_SHPREAMBLE |
IEEE80211_C_WPA |
IEEE80211_C_BGSCAN |
IEEE80211_C_MONITOR;
ic->ic_opmode = IEEE80211_M_STA;
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
ic->ic_headroom = sizeof(struct bwi_txbuf_hdr);
/* override default methods */
ic->ic_vap_create = bwi_vap_create;
ic->ic_vap_delete = bwi_vap_delete;
ic->ic_raw_xmit = bwi_raw_xmit;
ic->ic_updateslot = bwi_updateslot;
ic->ic_scan_start = bwi_scan_start;
ic->ic_scan_end = bwi_scan_end;
ic->ic_set_channel = bwi_set_channel;
+ ic->ic_transmit = bwi_transmit;
+ ic->ic_parent = bwi_parent;
sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan);
ieee80211_radiotap_attach(ic,
&sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
BWI_TX_RADIOTAP_PRESENT,
&sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
BWI_RX_RADIOTAP_PRESENT);
/*
* Add sysctl nodes
*/
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"fw_version", CTLFLAG_RD, &sc->sc_fw_version, 0,
"Firmware version");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"led_idle", CTLFLAG_RW, &sc->sc_led_idle, 0,
"# ticks before LED enters idle state");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"led_blink", CTLFLAG_RW, &sc->sc_led_blink, 0,
"Allow LED to blink");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"txpwr_calib", CTLFLAG_RW, &sc->sc_txpwr_calib, 0,
"Enable software TX power calibration");
#ifdef BWI_DEBUG
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags");
#endif
if (bootverbose)
ieee80211_announce(ic);
return (0);
fail:
BWI_LOCK_DESTROY(sc);
return (error);
}
int
bwi_detach(struct bwi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int i;
bwi_stop(sc, 1);
callout_drain(&sc->sc_led_blink_ch);
callout_drain(&sc->sc_calib_ch);
callout_drain(&sc->sc_watchdog_timer);
ieee80211_ifdetach(ic);
for (i = 0; i < sc->sc_nmac; ++i)
bwi_mac_detach(&sc->sc_mac[i]);
bwi_dma_free(sc);
- if_free(ifp);
taskqueue_free(sc->sc_tq);
+ mbufq_drain(&sc->sc_snd);
BWI_LOCK_DESTROY(sc);
return (0);
}
static struct ieee80211vap *
bwi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct bwi_vap *bvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
- bvp = (struct bwi_vap *) malloc(sizeof(struct bwi_vap),
- M_80211_VAP, M_WAITOK | M_ZERO);
- if (bvp == NULL)
- return NULL;
+ bvp = malloc(sizeof(struct bwi_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &bvp->bv_vap;
/* enable s/w bmiss handling for sta mode */
ieee80211_vap_setup(ic, vap, name, unit, opmode,
- flags | IEEE80211_CLONE_NOBEACONS, bssid, mac);
+ flags | IEEE80211_CLONE_NOBEACONS, bssid);
/* override default methods */
bvp->bv_newstate = vap->iv_newstate;
vap->iv_newstate = bwi_newstate;
#if 0
vap->iv_update_beacon = bwi_beacon_update;
#endif
ieee80211_ratectl_init(vap);
/* complete setup */
- ieee80211_vap_attach(vap, bwi_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, bwi_media_change, ieee80211_media_status,
+ mac);
ic->ic_opmode = opmode;
return vap;
}
static void
bwi_vap_delete(struct ieee80211vap *vap)
{
struct bwi_vap *bvp = BWI_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(bvp, M_80211_VAP);
}
void
bwi_suspend(struct bwi_softc *sc)
{
bwi_stop(sc, 1);
}
void
bwi_resume(struct bwi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- if (ifp->if_flags & IFF_UP)
+ if (sc->sc_ic.ic_nrunning > 0)
bwi_init(sc);
}
int
bwi_shutdown(struct bwi_softc *sc)
{
bwi_stop(sc, 1);
return 0;
}
static void
bwi_power_on(struct bwi_softc *sc, int with_pll)
{
uint32_t gpio_in, gpio_out, gpio_en;
uint16_t status;
gpio_in = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4);
if (gpio_in & BWI_PCIM_GPIO_PWR_ON)
goto back;
gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4);
gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4);
gpio_out |= BWI_PCIM_GPIO_PWR_ON;
gpio_en |= BWI_PCIM_GPIO_PWR_ON;
if (with_pll) {
/* Turn off PLL first */
gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF;
gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF;
}
pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4);
pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4);
DELAY(1000);
if (with_pll) {
/* Turn on PLL */
gpio_out &= ~BWI_PCIM_GPIO_PLL_PWR_OFF;
pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4);
DELAY(5000);
}
back:
/* Clear "Signaled Target Abort" */
status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2);
status &= ~PCIM_STATUS_STABORT;
pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2);
}
static int
bwi_power_off(struct bwi_softc *sc, int with_pll)
{
uint32_t gpio_out, gpio_en;
pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_IN, 4); /* dummy read */
gpio_out = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4);
gpio_en = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, 4);
gpio_out &= ~BWI_PCIM_GPIO_PWR_ON;
gpio_en |= BWI_PCIM_GPIO_PWR_ON;
if (with_pll) {
gpio_out |= BWI_PCIM_GPIO_PLL_PWR_OFF;
gpio_en |= BWI_PCIM_GPIO_PLL_PWR_OFF;
}
pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, gpio_out, 4);
pci_write_config(sc->sc_dev, BWI_PCIR_GPIO_ENABLE, gpio_en, 4);
return 0;
}
int
bwi_regwin_switch(struct bwi_softc *sc, struct bwi_regwin *rw,
struct bwi_regwin **old_rw)
{
int error;
if (old_rw != NULL)
*old_rw = NULL;
if (!BWI_REGWIN_EXIST(rw))
return EINVAL;
if (sc->sc_cur_regwin != rw) {
error = bwi_regwin_select(sc, rw->rw_id);
if (error) {
device_printf(sc->sc_dev, "can't select regwin %d\n",
rw->rw_id);
return error;
}
}
if (old_rw != NULL)
*old_rw = sc->sc_cur_regwin;
sc->sc_cur_regwin = rw;
return 0;
}
static int
bwi_regwin_select(struct bwi_softc *sc, int id)
{
uint32_t win = BWI_PCIM_REGWIN(id);
int i;
#define RETRY_MAX 50
for (i = 0; i < RETRY_MAX; ++i) {
pci_write_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, win, 4);
if (pci_read_config(sc->sc_dev, BWI_PCIR_SEL_REGWIN, 4) == win)
return 0;
DELAY(10);
}
#undef RETRY_MAX
return ENXIO;
}
static void
bwi_regwin_info(struct bwi_softc *sc, uint16_t *type, uint8_t *rev)
{
uint32_t val;
val = CSR_READ_4(sc, BWI_ID_HI);
*type = BWI_ID_HI_REGWIN_TYPE(val);
*rev = BWI_ID_HI_REGWIN_REV(val);
DPRINTF(sc, BWI_DBG_ATTACH, "regwin: type 0x%03x, rev %d, "
"vendor 0x%04x\n", *type, *rev,
__SHIFTOUT(val, BWI_ID_HI_REGWIN_VENDOR_MASK));
}
static int
bwi_bbp_attach(struct bwi_softc *sc)
{
#define N(arr) (int)(sizeof(arr) / sizeof(arr[0]))
uint16_t bbp_id, rw_type;
uint8_t rw_rev;
uint32_t info;
int error, nregwin, i;
/*
* Get 0th regwin information
* NOTE: 0th regwin should exist
*/
error = bwi_regwin_select(sc, 0);
if (error) {
device_printf(sc->sc_dev, "can't select regwin 0\n");
return error;
}
bwi_regwin_info(sc, &rw_type, &rw_rev);
/*
* Find out BBP id
*/
bbp_id = 0;
info = 0;
if (rw_type == BWI_REGWIN_T_COM) {
info = CSR_READ_4(sc, BWI_INFO);
bbp_id = __SHIFTOUT(info, BWI_INFO_BBPID_MASK);
BWI_CREATE_REGWIN(&sc->sc_com_regwin, 0, rw_type, rw_rev);
sc->sc_cap = CSR_READ_4(sc, BWI_CAPABILITY);
} else {
for (i = 0; i < N(bwi_bbpid_map); ++i) {
if (sc->sc_pci_did >= bwi_bbpid_map[i].did_min &&
sc->sc_pci_did <= bwi_bbpid_map[i].did_max) {
bbp_id = bwi_bbpid_map[i].bbp_id;
break;
}
}
if (bbp_id == 0) {
device_printf(sc->sc_dev, "no BBP id for device id "
"0x%04x\n", sc->sc_pci_did);
return ENXIO;
}
info = __SHIFTIN(sc->sc_pci_revid, BWI_INFO_BBPREV_MASK) |
__SHIFTIN(0, BWI_INFO_BBPPKG_MASK);
}
/*
* Find out number of regwins
*/
nregwin = 0;
if (rw_type == BWI_REGWIN_T_COM && rw_rev >= 4) {
nregwin = __SHIFTOUT(info, BWI_INFO_NREGWIN_MASK);
} else {
for (i = 0; i < N(bwi_regwin_count); ++i) {
if (bwi_regwin_count[i].bbp_id == bbp_id) {
nregwin = bwi_regwin_count[i].nregwin;
break;
}
}
if (nregwin == 0) {
device_printf(sc->sc_dev, "no number of win for "
"BBP id 0x%04x\n", bbp_id);
return ENXIO;
}
}
/* Record BBP id/rev for later using */
sc->sc_bbp_id = bbp_id;
sc->sc_bbp_rev = __SHIFTOUT(info, BWI_INFO_BBPREV_MASK);
sc->sc_bbp_pkg = __SHIFTOUT(info, BWI_INFO_BBPPKG_MASK);
device_printf(sc->sc_dev, "BBP: id 0x%04x, rev 0x%x, pkg %d\n",
sc->sc_bbp_id, sc->sc_bbp_rev, sc->sc_bbp_pkg);
DPRINTF(sc, BWI_DBG_ATTACH, "nregwin %d, cap 0x%08x\n",
nregwin, sc->sc_cap);
/*
* Create rest of the regwins
*/
/* Don't re-create common regwin, if it is already created */
i = BWI_REGWIN_EXIST(&sc->sc_com_regwin) ? 1 : 0;
for (; i < nregwin; ++i) {
/*
* Get regwin information
*/
error = bwi_regwin_select(sc, i);
if (error) {
device_printf(sc->sc_dev,
"can't select regwin %d\n", i);
return error;
}
bwi_regwin_info(sc, &rw_type, &rw_rev);
/*
* Try attach:
* 1) Bus (PCI/PCIE) regwin
* 2) MAC regwin
* Ignore rest types of regwin
*/
if (rw_type == BWI_REGWIN_T_BUSPCI ||
rw_type == BWI_REGWIN_T_BUSPCIE) {
if (BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) {
device_printf(sc->sc_dev,
"bus regwin already exists\n");
} else {
BWI_CREATE_REGWIN(&sc->sc_bus_regwin, i,
rw_type, rw_rev);
}
} else if (rw_type == BWI_REGWIN_T_MAC) {
/* XXX ignore return value */
bwi_mac_attach(sc, i, rw_rev);
}
}
/* At least one MAC shold exist */
if (!BWI_REGWIN_EXIST(&sc->sc_mac[0].mac_regwin)) {
device_printf(sc->sc_dev, "no MAC was found\n");
return ENXIO;
}
KASSERT(sc->sc_nmac > 0, ("no mac's"));
/* Bus regwin must exist */
if (!BWI_REGWIN_EXIST(&sc->sc_bus_regwin)) {
device_printf(sc->sc_dev, "no bus regwin was found\n");
return ENXIO;
}
/* Start with first MAC */
error = bwi_regwin_switch(sc, &sc->sc_mac[0].mac_regwin, NULL);
if (error)
return error;
return 0;
#undef N
}
int
bwi_bus_init(struct bwi_softc *sc, struct bwi_mac *mac)
{
struct bwi_regwin *old, *bus;
uint32_t val;
int error;
bus = &sc->sc_bus_regwin;
KASSERT(sc->sc_cur_regwin == &mac->mac_regwin, ("not cur regwin"));
/*
* Tell bus to generate requested interrupts
*/
if (bus->rw_rev < 6 && bus->rw_type == BWI_REGWIN_T_BUSPCI) {
/*
* NOTE: Read BWI_FLAGS from MAC regwin
*/
val = CSR_READ_4(sc, BWI_FLAGS);
error = bwi_regwin_switch(sc, bus, &old);
if (error)
return error;
CSR_SETBITS_4(sc, BWI_INTRVEC, (val & BWI_FLAGS_INTR_MASK));
} else {
uint32_t mac_mask;
mac_mask = 1 << mac->mac_id;
error = bwi_regwin_switch(sc, bus, &old);
if (error)
return error;
val = pci_read_config(sc->sc_dev, BWI_PCIR_INTCTL, 4);
val |= mac_mask << 8;
pci_write_config(sc->sc_dev, BWI_PCIR_INTCTL, val, 4);
}
if (sc->sc_flags & BWI_F_BUS_INITED)
goto back;
if (bus->rw_type == BWI_REGWIN_T_BUSPCI) {
/*
* Enable prefetch and burst
*/
CSR_SETBITS_4(sc, BWI_BUS_CONFIG,
BWI_BUS_CONFIG_PREFETCH | BWI_BUS_CONFIG_BURST);
if (bus->rw_rev < 5) {
struct bwi_regwin *com = &sc->sc_com_regwin;
/*
* Configure timeouts for bus operation
*/
/*
* Set service timeout and request timeout
*/
CSR_SETBITS_4(sc, BWI_CONF_LO,
__SHIFTIN(BWI_CONF_LO_SERVTO, BWI_CONF_LO_SERVTO_MASK) |
__SHIFTIN(BWI_CONF_LO_REQTO, BWI_CONF_LO_REQTO_MASK));
/*
* If there is common regwin, we switch to that regwin
* and switch back to bus regwin once we have done.
*/
if (BWI_REGWIN_EXIST(com)) {
error = bwi_regwin_switch(sc, com, NULL);
if (error)
return error;
}
/* Let bus know what we have changed */
CSR_WRITE_4(sc, BWI_BUS_ADDR, BWI_BUS_ADDR_MAGIC);
CSR_READ_4(sc, BWI_BUS_ADDR); /* Flush */
CSR_WRITE_4(sc, BWI_BUS_DATA, 0);
CSR_READ_4(sc, BWI_BUS_DATA); /* Flush */
if (BWI_REGWIN_EXIST(com)) {
error = bwi_regwin_switch(sc, bus, NULL);
if (error)
return error;
}
} else if (bus->rw_rev >= 11) {
/*
* Enable memory read multiple
*/
CSR_SETBITS_4(sc, BWI_BUS_CONFIG, BWI_BUS_CONFIG_MRM);
}
} else {
/* TODO:PCIE */
}
sc->sc_flags |= BWI_F_BUS_INITED;
back:
return bwi_regwin_switch(sc, old, NULL);
}
static void
bwi_get_card_flags(struct bwi_softc *sc)
{
#define PCI_VENDOR_APPLE 0x106b
#define PCI_VENDOR_DELL 0x1028
sc->sc_card_flags = bwi_read_sprom(sc, BWI_SPROM_CARD_FLAGS);
if (sc->sc_card_flags == 0xffff)
sc->sc_card_flags = 0;
if (sc->sc_pci_subvid == PCI_VENDOR_DELL &&
sc->sc_bbp_id == BWI_BBPID_BCM4301 &&
sc->sc_pci_revid == 0x74)
sc->sc_card_flags |= BWI_CARD_F_BT_COEXIST;
if (sc->sc_pci_subvid == PCI_VENDOR_APPLE &&
sc->sc_pci_subdid == 0x4e && /* XXX */
sc->sc_pci_revid > 0x40)
sc->sc_card_flags |= BWI_CARD_F_PA_GPIO9;
DPRINTF(sc, BWI_DBG_ATTACH, "card flags 0x%04x\n", sc->sc_card_flags);
#undef PCI_VENDOR_DELL
#undef PCI_VENDOR_APPLE
}
static void
bwi_get_eaddr(struct bwi_softc *sc, uint16_t eaddr_ofs, uint8_t *eaddr)
{
int i;
for (i = 0; i < 3; ++i) {
*((uint16_t *)eaddr + i) =
htobe16(bwi_read_sprom(sc, eaddr_ofs + 2 * i));
}
}
static void
bwi_get_clock_freq(struct bwi_softc *sc, struct bwi_clock_freq *freq)
{
struct bwi_regwin *com;
uint32_t val;
u_int div;
int src;
bzero(freq, sizeof(*freq));
com = &sc->sc_com_regwin;
KASSERT(BWI_REGWIN_EXIST(com), ("regwin does not exist"));
KASSERT(sc->sc_cur_regwin == com, ("wrong regwin"));
KASSERT(sc->sc_cap & BWI_CAP_CLKMODE, ("wrong clock mode"));
/*
* Calculate clock frequency
*/
src = -1;
div = 0;
if (com->rw_rev < 6) {
val = pci_read_config(sc->sc_dev, BWI_PCIR_GPIO_OUT, 4);
if (val & BWI_PCIM_GPIO_OUT_CLKSRC) {
src = BWI_CLKSRC_PCI;
div = 64;
} else {
src = BWI_CLKSRC_CS_OSC;
div = 32;
}
} else if (com->rw_rev < 10) {
val = CSR_READ_4(sc, BWI_CLOCK_CTRL);
src = __SHIFTOUT(val, BWI_CLOCK_CTRL_CLKSRC);
if (src == BWI_CLKSRC_LP_OSC) {
div = 1;
} else {
div = (__SHIFTOUT(val, BWI_CLOCK_CTRL_FDIV) + 1) << 2;
/* Unknown source */
if (src >= BWI_CLKSRC_MAX)
src = BWI_CLKSRC_CS_OSC;
}
} else {
val = CSR_READ_4(sc, BWI_CLOCK_INFO);
src = BWI_CLKSRC_CS_OSC;
div = (__SHIFTOUT(val, BWI_CLOCK_INFO_FDIV) + 1) << 2;
}
KASSERT(src >= 0 && src < BWI_CLKSRC_MAX, ("bad src %d", src));
KASSERT(div != 0, ("div zero"));
DPRINTF(sc, BWI_DBG_ATTACH, "clksrc %s\n",
src == BWI_CLKSRC_PCI ? "PCI" :
(src == BWI_CLKSRC_LP_OSC ? "LP_OSC" : "CS_OSC"));
freq->clkfreq_min = bwi_clkfreq[src].freq_min / div;
freq->clkfreq_max = bwi_clkfreq[src].freq_max / div;
DPRINTF(sc, BWI_DBG_ATTACH, "clkfreq min %u, max %u\n",
freq->clkfreq_min, freq->clkfreq_max);
}
static int
bwi_set_clock_mode(struct bwi_softc *sc, enum bwi_clock_mode clk_mode)
{
struct bwi_regwin *old, *com;
uint32_t clk_ctrl, clk_src;
int error, pwr_off = 0;
com = &sc->sc_com_regwin;
if (!BWI_REGWIN_EXIST(com))
return 0;
if (com->rw_rev >= 10 || com->rw_rev < 6)
return 0;
/*
* For common regwin whose rev is [6, 10), the chip
* must be capable to change clock mode.
*/
if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0)
return 0;
error = bwi_regwin_switch(sc, com, &old);
if (error)
return error;
if (clk_mode == BWI_CLOCK_MODE_FAST)
bwi_power_on(sc, 0); /* Don't turn on PLL */
clk_ctrl = CSR_READ_4(sc, BWI_CLOCK_CTRL);
clk_src = __SHIFTOUT(clk_ctrl, BWI_CLOCK_CTRL_CLKSRC);
switch (clk_mode) {
case BWI_CLOCK_MODE_FAST:
clk_ctrl &= ~BWI_CLOCK_CTRL_SLOW;
clk_ctrl |= BWI_CLOCK_CTRL_IGNPLL;
break;
case BWI_CLOCK_MODE_SLOW:
clk_ctrl |= BWI_CLOCK_CTRL_SLOW;
break;
case BWI_CLOCK_MODE_DYN:
clk_ctrl &= ~(BWI_CLOCK_CTRL_SLOW |
BWI_CLOCK_CTRL_IGNPLL |
BWI_CLOCK_CTRL_NODYN);
if (clk_src != BWI_CLKSRC_CS_OSC) {
clk_ctrl |= BWI_CLOCK_CTRL_NODYN;
pwr_off = 1;
}
break;
}
CSR_WRITE_4(sc, BWI_CLOCK_CTRL, clk_ctrl);
if (pwr_off)
bwi_power_off(sc, 0); /* Leave PLL as it is */
return bwi_regwin_switch(sc, old, NULL);
}
static int
bwi_set_clock_delay(struct bwi_softc *sc)
{
struct bwi_regwin *old, *com;
int error;
com = &sc->sc_com_regwin;
if (!BWI_REGWIN_EXIST(com))
return 0;
error = bwi_regwin_switch(sc, com, &old);
if (error)
return error;
if (sc->sc_bbp_id == BWI_BBPID_BCM4321) {
if (sc->sc_bbp_rev == 0)
CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC0);
else if (sc->sc_bbp_rev == 1)
CSR_WRITE_4(sc, BWI_CONTROL, BWI_CONTROL_MAGIC1);
}
if (sc->sc_cap & BWI_CAP_CLKMODE) {
if (com->rw_rev >= 10) {
CSR_FILT_SETBITS_4(sc, BWI_CLOCK_INFO, 0xffff, 0x40000);
} else {
struct bwi_clock_freq freq;
bwi_get_clock_freq(sc, &freq);
CSR_WRITE_4(sc, BWI_PLL_ON_DELAY,
howmany(freq.clkfreq_max * 150, 1000000));
CSR_WRITE_4(sc, BWI_FREQ_SEL_DELAY,
howmany(freq.clkfreq_max * 15, 1000000));
}
}
return bwi_regwin_switch(sc, old, NULL);
}
static void
-bwi_init(void *xsc)
+bwi_init(struct bwi_softc *sc)
{
- struct bwi_softc *sc = xsc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
BWI_LOCK(sc);
bwi_init_statechg(sc, 1);
BWI_UNLOCK(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_flags & BWI_F_RUNNING)
ieee80211_start_all(ic); /* start all vap's */
}
static void
bwi_init_statechg(struct bwi_softc *sc, int statechg)
{
- struct ifnet *ifp = sc->sc_ifp;
struct bwi_mac *mac;
int error;
+ BWI_ASSERT_LOCKED(sc);
+
bwi_stop_locked(sc, statechg);
bwi_bbp_power_on(sc, BWI_CLOCK_MODE_FAST);
/* TODO: 2 MAC */
mac = &sc->sc_mac[0];
error = bwi_regwin_switch(sc, &mac->mac_regwin, NULL);
if (error) {
- if_printf(ifp, "%s: error %d on regwin switch\n",
+ device_printf(sc->sc_dev, "%s: error %d on regwin switch\n",
__func__, error);
goto bad;
}
error = bwi_mac_init(mac);
if (error) {
- if_printf(ifp, "%s: error %d on MAC init\n", __func__, error);
+ device_printf(sc->sc_dev, "%s: error %d on MAC init\n",
+ __func__, error);
goto bad;
}
bwi_bbp_power_on(sc, BWI_CLOCK_MODE_DYN);
bwi_set_bssid(sc, bwi_zero_addr); /* Clear BSSID */
- bwi_set_addr_filter(sc, BWI_ADDR_FILTER_MYADDR, IF_LLADDR(ifp));
+ bwi_set_addr_filter(sc, BWI_ADDR_FILTER_MYADDR, sc->sc_ic.ic_macaddr);
bwi_mac_reset_hwkeys(mac);
if ((mac->mac_flags & BWI_MAC_F_HAS_TXSTATS) == 0) {
int i;
#define NRETRY 1000
/*
* Drain any possible pending TX status
*/
for (i = 0; i < NRETRY; ++i) {
if ((CSR_READ_4(sc, BWI_TXSTATUS0) &
BWI_TXSTATUS0_VALID) == 0)
break;
CSR_READ_4(sc, BWI_TXSTATUS1);
}
if (i == NRETRY)
- if_printf(ifp, "%s: can't drain TX status\n", __func__);
+ device_printf(sc->sc_dev,
+ "%s: can't drain TX status\n", __func__);
#undef NRETRY
}
if (mac->mac_phy.phy_mode == IEEE80211_MODE_11G)
bwi_mac_updateslot(mac, 1);
/* Start MAC */
error = bwi_mac_start(mac);
if (error) {
- if_printf(ifp, "%s: error %d starting MAC\n", __func__, error);
+ device_printf(sc->sc_dev, "%s: error %d starting MAC\n",
+ __func__, error);
goto bad;
}
/* Clear stop flag before enabling interrupt */
sc->sc_flags &= ~BWI_F_STOP;
-
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_flags |= BWI_F_RUNNING;
callout_reset(&sc->sc_watchdog_timer, hz, bwi_watchdog, sc);
/* Enable intrs */
bwi_enable_intrs(sc, BWI_INIT_INTRS);
return;
bad:
bwi_stop_locked(sc, 1);
}
-static int
-bwi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+bwi_parent(struct ieee80211com *ic)
{
-#define IS_RUNNING(ifp) \
- ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
- struct bwi_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ struct bwi_softc *sc = ic->ic_softc;
+ int startall = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- BWI_LOCK(sc);
- if (IS_RUNNING(ifp)) {
- struct bwi_mac *mac;
- int promisc = -1;
+ BWI_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ struct bwi_mac *mac;
+ int promisc = -1;
- KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
- ("current regwin type %d",
- sc->sc_cur_regwin->rw_type));
- mac = (struct bwi_mac *)sc->sc_cur_regwin;
+ KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
+ ("current regwin type %d",
+ sc->sc_cur_regwin->rw_type));
+ mac = (struct bwi_mac *)sc->sc_cur_regwin;
- if ((ifp->if_flags & IFF_PROMISC) &&
- (sc->sc_flags & BWI_F_PROMISC) == 0) {
- promisc = 1;
- sc->sc_flags |= BWI_F_PROMISC;
- } else if ((ifp->if_flags & IFF_PROMISC) == 0 &&
- (sc->sc_flags & BWI_F_PROMISC)) {
- promisc = 0;
- sc->sc_flags &= ~BWI_F_PROMISC;
- }
-
- if (promisc >= 0)
- bwi_mac_set_promisc(mac, promisc);
+ if (ic->ic_promisc > 0 && (sc->sc_flags & BWI_F_PROMISC) == 0) {
+ promisc = 1;
+ sc->sc_flags |= BWI_F_PROMISC;
+ } else if (ic->ic_promisc == 0 &&
+ (sc->sc_flags & BWI_F_PROMISC) != 0) {
+ promisc = 0;
+ sc->sc_flags &= ~BWI_F_PROMISC;
}
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- bwi_init_statechg(sc, 1);
- startall = 1;
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- bwi_stop_locked(sc, 1);
- }
- BWI_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
+ if (promisc >= 0)
+ bwi_mac_set_promisc(mac, promisc);
}
- return error;
-#undef IS_RUNNING
+ if (ic->ic_nrunning > 0) {
+ if ((sc->sc_flags & BWI_F_RUNNING) == 0) {
+ bwi_init_statechg(sc, 1);
+ startall = 1;
+ }
+ } else if (sc->sc_flags & BWI_F_RUNNING)
+ bwi_stop_locked(sc, 1);
+ BWI_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
-static void
-bwi_start(struct ifnet *ifp)
+static int
+bwi_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct bwi_softc *sc = ifp->if_softc;
+ struct bwi_softc *sc = ic->ic_softc;
+ int error;
BWI_LOCK(sc);
- bwi_start_locked(ifp);
+ if ((sc->sc_flags & BWI_F_RUNNING) == 0) {
+ BWI_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ BWI_UNLOCK(sc);
+ return (error);
+ }
+ bwi_start_locked(sc);
BWI_UNLOCK(sc);
+ return (0);
}
static void
-bwi_start_locked(struct ifnet *ifp)
+bwi_start_locked(struct bwi_softc *sc)
{
- struct bwi_softc *sc = ifp->if_softc;
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
- struct ieee80211_key *k;
struct mbuf *m;
int trans, idx;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
+ BWI_ASSERT_LOCKED(sc);
trans = 0;
idx = tbd->tbd_idx;
- while (tbd->tbd_buf[idx].tb_mbuf == NULL) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */
- if (m == NULL)
- break;
-
+ while (tbd->tbd_buf[idx].tb_mbuf == NULL &&
+ tbd->tbd_used + BWI_TX_NSPRDESC < BWI_TX_NDESC &&
+ (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
wh = mtod(m, struct ieee80211_frame *);
- if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
- k = ieee80211_crypto_encap(ni, m);
- if (k == NULL) {
- ieee80211_free_node(ni);
- m_freem(m);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- continue;
- }
+ if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) != 0 &&
+ ieee80211_crypto_encap(ni, m) == NULL) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
+ ieee80211_free_node(ni);
+ m_freem(m);
+ continue;
}
- wh = NULL; /* Catch any invalid use */
-
if (bwi_encap(sc, idx, m, ni) != 0) {
/* 'm' is freed in bwi_encap() if we reach here */
- if (ni != NULL)
+ if (ni != NULL) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ } else
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
continue;
}
-
trans = 1;
tbd->tbd_used++;
idx = (idx + 1) % BWI_TX_NDESC;
-
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
- if (tbd->tbd_used + BWI_TX_NSPRDESC >= BWI_TX_NDESC) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
}
- tbd->tbd_idx = idx;
+ tbd->tbd_idx = idx;
if (trans)
sc->sc_tx_timer = 5;
}
static int
bwi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct bwi_softc *sc = ic->ic_softc;
/* XXX wme? */
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
int idx, error;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if ((sc->sc_flags & BWI_F_RUNNING) == 0) {
ieee80211_free_node(ni);
m_freem(m);
return ENETDOWN;
}
BWI_LOCK(sc);
idx = tbd->tbd_idx;
KASSERT(tbd->tbd_buf[idx].tb_mbuf == NULL, ("slot %d not empty", idx));
if (params == NULL) {
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
*/
error = bwi_encap(sc, idx, m, ni);
} else {
/*
* Caller supplied explicit parameters to use in
* sending the frame.
*/
error = bwi_encap_raw(sc, idx, m, ni, params);
}
if (error == 0) {
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- if (++tbd->tbd_used + BWI_TX_NSPRDESC >= BWI_TX_NDESC)
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ tbd->tbd_used++;
tbd->tbd_idx = (idx + 1) % BWI_TX_NDESC;
sc->sc_tx_timer = 5;
- } else {
+ } else
/* NB: m is reclaimed on encap failure */
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- }
BWI_UNLOCK(sc);
return error;
}
static void
bwi_watchdog(void *arg)
{
struct bwi_softc *sc;
- struct ifnet *ifp;
sc = arg;
- ifp = sc->sc_ifp;
BWI_ASSERT_LOCKED(sc);
if (sc->sc_tx_timer != 0 && --sc->sc_tx_timer == 0) {
- if_printf(ifp, "watchdog timeout\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ device_printf(sc->sc_dev, "watchdog timeout\n");
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
taskqueue_enqueue(sc->sc_tq, &sc->sc_restart_task);
}
callout_reset(&sc->sc_watchdog_timer, hz, bwi_watchdog, sc);
}
static void
bwi_stop(struct bwi_softc *sc, int statechg)
{
BWI_LOCK(sc);
bwi_stop_locked(sc, statechg);
BWI_UNLOCK(sc);
}
static void
bwi_stop_locked(struct bwi_softc *sc, int statechg)
{
- struct ifnet *ifp = sc->sc_ifp;
struct bwi_mac *mac;
int i, error, pwr_off = 0;
BWI_ASSERT_LOCKED(sc);
callout_stop(&sc->sc_calib_ch);
callout_stop(&sc->sc_led_blink_ch);
sc->sc_led_blinking = 0;
sc->sc_flags |= BWI_F_STOP;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (sc->sc_flags & BWI_F_RUNNING) {
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
bwi_disable_intrs(sc, BWI_ALL_INTRS);
CSR_READ_4(sc, BWI_MAC_INTR_MASK);
bwi_mac_stop(mac);
}
for (i = 0; i < sc->sc_nmac; ++i) {
struct bwi_regwin *old_rw;
mac = &sc->sc_mac[i];
if ((mac->mac_flags & BWI_MAC_F_INITED) == 0)
continue;
error = bwi_regwin_switch(sc, &mac->mac_regwin, &old_rw);
if (error)
continue;
bwi_mac_shutdown(mac);
pwr_off = 1;
bwi_regwin_switch(sc, old_rw, NULL);
}
if (pwr_off)
bwi_bbp_power_off(sc);
sc->sc_tx_timer = 0;
callout_stop(&sc->sc_watchdog_timer);
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_flags &= ~BWI_F_RUNNING;
}
void
bwi_intr(void *xsc)
{
struct bwi_softc *sc = xsc;
- struct ifnet *ifp = sc->sc_ifp;
struct bwi_mac *mac;
uint32_t intr_status;
uint32_t txrx_intr_status[BWI_TXRX_NRING];
int i, txrx_error, tx = 0, rx_data = -1;
BWI_LOCK(sc);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ if ((sc->sc_flags & BWI_F_RUNNING) == 0 ||
(sc->sc_flags & BWI_F_STOP)) {
BWI_UNLOCK(sc);
return;
}
/*
* Get interrupt status
*/
intr_status = CSR_READ_4(sc, BWI_MAC_INTR_STATUS);
if (intr_status == 0xffffffff) { /* Not for us */
BWI_UNLOCK(sc);
return;
}
DPRINTF(sc, BWI_DBG_INTR, "intr status 0x%08x\n", intr_status);
intr_status &= CSR_READ_4(sc, BWI_MAC_INTR_MASK);
if (intr_status == 0) { /* Nothing is interesting */
BWI_UNLOCK(sc);
return;
}
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
txrx_error = 0;
DPRINTF(sc, BWI_DBG_INTR, "%s\n", "TX/RX intr");
for (i = 0; i < BWI_TXRX_NRING; ++i) {
uint32_t mask;
if (BWI_TXRX_IS_RX(i))
mask = BWI_TXRX_RX_INTRS;
else
mask = BWI_TXRX_TX_INTRS;
txrx_intr_status[i] =
CSR_READ_4(sc, BWI_TXRX_INTR_STATUS(i)) & mask;
_DPRINTF(sc, BWI_DBG_INTR, ", %d 0x%08x",
i, txrx_intr_status[i]);
if (txrx_intr_status[i] & BWI_TXRX_INTR_ERROR) {
- if_printf(ifp,
+ device_printf(sc->sc_dev,
"%s: intr fatal TX/RX (%d) error 0x%08x\n",
__func__, i, txrx_intr_status[i]);
txrx_error = 1;
}
}
_DPRINTF(sc, BWI_DBG_INTR, "%s\n", "");
/*
* Acknowledge interrupt
*/
CSR_WRITE_4(sc, BWI_MAC_INTR_STATUS, intr_status);
for (i = 0; i < BWI_TXRX_NRING; ++i)
CSR_WRITE_4(sc, BWI_TXRX_INTR_STATUS(i), txrx_intr_status[i]);
/* Disable all interrupts */
bwi_disable_intrs(sc, BWI_ALL_INTRS);
/*
* http://bcm-specs.sipsolutions.net/Interrupts
* Says for this bit (0x800):
* "Fatal Error
*
* We got this one while testing things when by accident the
* template ram wasn't set to big endian when it should have
* been after writing the initial values. It keeps on being
* triggered, the only way to stop it seems to shut down the
* chip."
*
* Suggesting that we should never get it and if we do we're not
* feeding TX packets into the MAC correctly if we do... Apparently,
* it is valid only on mac version 5 and higher, but I couldn't
* find a reference for that... Since I see them from time to time
* on my card, this suggests an error in the tx path still...
*/
if (intr_status & BWI_INTR_PHY_TXERR) {
if (mac->mac_flags & BWI_MAC_F_PHYE_RESET) {
- if_printf(ifp, "%s: intr PHY TX error\n", __func__);
+ device_printf(sc->sc_dev, "%s: intr PHY TX error\n",
+ __func__);
taskqueue_enqueue(sc->sc_tq, &sc->sc_restart_task);
BWI_UNLOCK(sc);
return;
}
}
if (txrx_error) {
/* TODO: reset device */
}
if (intr_status & BWI_INTR_TBTT)
bwi_mac_config_ps(mac);
if (intr_status & BWI_INTR_EO_ATIM)
- if_printf(ifp, "EO_ATIM\n");
+ device_printf(sc->sc_dev, "EO_ATIM\n");
if (intr_status & BWI_INTR_PMQ) {
for (;;) {
if ((CSR_READ_4(sc, BWI_MAC_PS_STATUS) & 0x8) == 0)
break;
}
CSR_WRITE_2(sc, BWI_MAC_PS_STATUS, 0x2);
}
if (intr_status & BWI_INTR_NOISE)
- if_printf(ifp, "intr noise\n");
+ device_printf(sc->sc_dev, "intr noise\n");
if (txrx_intr_status[0] & BWI_TXRX_INTR_RX) {
rx_data = sc->sc_rxeof(sc);
if (sc->sc_flags & BWI_F_STOP) {
BWI_UNLOCK(sc);
return;
}
}
if (txrx_intr_status[3] & BWI_TXRX_INTR_RX) {
sc->sc_txeof_status(sc);
tx = 1;
}
if (intr_status & BWI_INTR_TX_DONE) {
bwi_txeof(sc);
tx = 1;
}
/* Re-enable interrupts */
bwi_enable_intrs(sc, BWI_INIT_INTRS);
if (sc->sc_blink_led != NULL && sc->sc_led_blink) {
int evt = BWI_LED_EVENT_NONE;
if (tx && rx_data > 0) {
if (sc->sc_rx_rate > sc->sc_tx_rate)
evt = BWI_LED_EVENT_RX;
else
evt = BWI_LED_EVENT_TX;
} else if (tx) {
evt = BWI_LED_EVENT_TX;
} else if (rx_data > 0) {
evt = BWI_LED_EVENT_RX;
} else if (rx_data == 0) {
evt = BWI_LED_EVENT_POLL;
}
if (evt != BWI_LED_EVENT_NONE)
bwi_led_event(sc, evt);
}
BWI_UNLOCK(sc);
}
static void
bwi_scan_start(struct ieee80211com *ic)
{
struct bwi_softc *sc = ic->ic_softc;
BWI_LOCK(sc);
/* Enable MAC beacon promiscuity */
CSR_SETBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PASS_BCN);
BWI_UNLOCK(sc);
}
static void
bwi_set_channel(struct ieee80211com *ic)
{
struct bwi_softc *sc = ic->ic_softc;
struct ieee80211_channel *c = ic->ic_curchan;
struct bwi_mac *mac;
BWI_LOCK(sc);
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
bwi_rf_set_chan(mac, ieee80211_chan2ieee(ic, c), 0);
sc->sc_rates = ieee80211_get_ratetable(c);
/*
* Setup radio tap channel freq and flags
*/
sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq =
htole16(c->ic_freq);
sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags =
htole16(c->ic_flags & 0xffff);
BWI_UNLOCK(sc);
}
static void
bwi_scan_end(struct ieee80211com *ic)
{
struct bwi_softc *sc = ic->ic_softc;
BWI_LOCK(sc);
CSR_CLRBITS_4(sc, BWI_MAC_STATUS, BWI_MAC_STATUS_PASS_BCN);
BWI_UNLOCK(sc);
}
static int
bwi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct bwi_vap *bvp = BWI_VAP(vap);
- struct ieee80211com *ic = vap->iv_ic;
- enum ieee80211_state ostate = vap->iv_state;
+ struct ieee80211com *ic= vap->iv_ic;
struct bwi_softc *sc = ic->ic_softc;
+ enum ieee80211_state ostate = vap->iv_state;
struct bwi_mac *mac;
int error;
BWI_LOCK(sc);
callout_stop(&sc->sc_calib_ch);
if (nstate == IEEE80211_S_INIT)
sc->sc_txpwrcb_type = BWI_TXPWR_INIT;
bwi_led_newstate(sc, nstate);
error = bvp->bv_newstate(vap, nstate, arg);
if (error != 0)
goto back;
/*
* Clear the BSSID when we stop a STA
*/
if (vap->iv_opmode == IEEE80211_M_STA) {
if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
/*
* Clear out the BSSID. If we reassociate to
* the same AP, this will reinialize things
* correctly...
*/
if (ic->ic_opmode == IEEE80211_M_STA &&
!(sc->sc_flags & BWI_F_STOP))
bwi_set_bssid(sc, bwi_zero_addr);
}
}
if (vap->iv_opmode == IEEE80211_M_MONITOR) {
/* Nothing to do */
} else if (nstate == IEEE80211_S_RUN) {
bwi_set_bssid(sc, vap->iv_bss->ni_bssid);
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
/* Initial TX power calibration */
bwi_mac_calibrate_txpower(mac, BWI_TXPWR_INIT);
#ifdef notyet
sc->sc_txpwrcb_type = BWI_TXPWR_FORCE;
#else
sc->sc_txpwrcb_type = BWI_TXPWR_CALIB;
#endif
callout_reset(&sc->sc_calib_ch, hz, bwi_calibrate, sc);
}
back:
BWI_UNLOCK(sc);
return error;
}
static int
bwi_media_change(struct ifnet *ifp)
{
int error = ieee80211_media_change(ifp);
/* NB: only the fixed rate can change and that doesn't need a reset */
return (error == ENETRESET ? 0 : error);
}
static int
bwi_dma_alloc(struct bwi_softc *sc)
{
int error, i, has_txstats;
bus_addr_t lowaddr = 0;
bus_size_t tx_ring_sz, rx_ring_sz, desc_sz = 0;
uint32_t txrx_ctrl_step = 0;
has_txstats = 0;
for (i = 0; i < sc->sc_nmac; ++i) {
if (sc->sc_mac[i].mac_flags & BWI_MAC_F_HAS_TXSTATS) {
has_txstats = 1;
break;
}
}
switch (sc->sc_bus_space) {
case BWI_BUS_SPACE_30BIT:
case BWI_BUS_SPACE_32BIT:
if (sc->sc_bus_space == BWI_BUS_SPACE_30BIT)
lowaddr = BWI_BUS_SPACE_MAXADDR;
else
lowaddr = BUS_SPACE_MAXADDR_32BIT;
desc_sz = sizeof(struct bwi_desc32);
txrx_ctrl_step = 0x20;
sc->sc_init_tx_ring = bwi_init_tx_ring32;
sc->sc_free_tx_ring = bwi_free_tx_ring32;
sc->sc_init_rx_ring = bwi_init_rx_ring32;
sc->sc_free_rx_ring = bwi_free_rx_ring32;
sc->sc_setup_rxdesc = bwi_setup_rx_desc32;
sc->sc_setup_txdesc = bwi_setup_tx_desc32;
sc->sc_rxeof = bwi_rxeof32;
sc->sc_start_tx = bwi_start_tx32;
if (has_txstats) {
sc->sc_init_txstats = bwi_init_txstats32;
sc->sc_free_txstats = bwi_free_txstats32;
sc->sc_txeof_status = bwi_txeof_status32;
}
break;
case BWI_BUS_SPACE_64BIT:
lowaddr = BUS_SPACE_MAXADDR; /* XXX */
desc_sz = sizeof(struct bwi_desc64);
txrx_ctrl_step = 0x40;
sc->sc_init_tx_ring = bwi_init_tx_ring64;
sc->sc_free_tx_ring = bwi_free_tx_ring64;
sc->sc_init_rx_ring = bwi_init_rx_ring64;
sc->sc_free_rx_ring = bwi_free_rx_ring64;
sc->sc_setup_rxdesc = bwi_setup_rx_desc64;
sc->sc_setup_txdesc = bwi_setup_tx_desc64;
sc->sc_rxeof = bwi_rxeof64;
sc->sc_start_tx = bwi_start_tx64;
if (has_txstats) {
sc->sc_init_txstats = bwi_init_txstats64;
sc->sc_free_txstats = bwi_free_txstats64;
sc->sc_txeof_status = bwi_txeof_status64;
}
break;
}
KASSERT(lowaddr != 0, ("lowaddr zero"));
KASSERT(desc_sz != 0, ("desc_sz zero"));
KASSERT(txrx_ctrl_step != 0, ("txrx_ctrl_step zero"));
tx_ring_sz = roundup(desc_sz * BWI_TX_NDESC, BWI_RING_ALIGN);
rx_ring_sz = roundup(desc_sz * BWI_RX_NDESC, BWI_RING_ALIGN);
/*
* Create top level DMA tag
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
BWI_ALIGN, 0, /* alignment, bounds */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE, /* maxsize */
BUS_SPACE_UNRESTRICTED, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->sc_parent_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create parent DMA tag\n");
return error;
}
#define TXRX_CTRL(idx) (BWI_TXRX_CTRL_BASE + (idx) * txrx_ctrl_step)
/*
* Create TX ring DMA stuffs
*/
error = bus_dma_tag_create(sc->sc_parent_dtag,
BWI_RING_ALIGN, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
tx_ring_sz,
1,
tx_ring_sz,
0,
NULL, NULL,
&sc->sc_txring_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create TX ring DMA tag\n");
return error;
}
for (i = 0; i < BWI_TX_NRING; ++i) {
error = bwi_dma_ring_alloc(sc, sc->sc_txring_dtag,
&sc->sc_tx_rdata[i], tx_ring_sz,
TXRX_CTRL(i));
if (error) {
device_printf(sc->sc_dev, "%dth TX ring "
"DMA alloc failed\n", i);
return error;
}
}
/*
* Create RX ring DMA stuffs
*/
error = bus_dma_tag_create(sc->sc_parent_dtag,
BWI_RING_ALIGN, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
rx_ring_sz,
1,
rx_ring_sz,
0,
NULL, NULL,
&sc->sc_rxring_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create RX ring DMA tag\n");
return error;
}
error = bwi_dma_ring_alloc(sc, sc->sc_rxring_dtag, &sc->sc_rx_rdata,
rx_ring_sz, TXRX_CTRL(0));
if (error) {
device_printf(sc->sc_dev, "RX ring DMA alloc failed\n");
return error;
}
if (has_txstats) {
error = bwi_dma_txstats_alloc(sc, TXRX_CTRL(3), desc_sz);
if (error) {
device_printf(sc->sc_dev,
"TX stats DMA alloc failed\n");
return error;
}
}
#undef TXRX_CTRL
return bwi_dma_mbuf_create(sc);
}
static void
bwi_dma_free(struct bwi_softc *sc)
{
if (sc->sc_txring_dtag != NULL) {
int i;
for (i = 0; i < BWI_TX_NRING; ++i) {
struct bwi_ring_data *rd = &sc->sc_tx_rdata[i];
if (rd->rdata_desc != NULL) {
bus_dmamap_unload(sc->sc_txring_dtag,
rd->rdata_dmap);
bus_dmamem_free(sc->sc_txring_dtag,
rd->rdata_desc,
rd->rdata_dmap);
}
}
bus_dma_tag_destroy(sc->sc_txring_dtag);
}
if (sc->sc_rxring_dtag != NULL) {
struct bwi_ring_data *rd = &sc->sc_rx_rdata;
if (rd->rdata_desc != NULL) {
bus_dmamap_unload(sc->sc_rxring_dtag, rd->rdata_dmap);
bus_dmamem_free(sc->sc_rxring_dtag, rd->rdata_desc,
rd->rdata_dmap);
}
bus_dma_tag_destroy(sc->sc_rxring_dtag);
}
bwi_dma_txstats_free(sc);
bwi_dma_mbuf_destroy(sc, BWI_TX_NRING, 1);
if (sc->sc_parent_dtag != NULL)
bus_dma_tag_destroy(sc->sc_parent_dtag);
}
static int
bwi_dma_ring_alloc(struct bwi_softc *sc, bus_dma_tag_t dtag,
struct bwi_ring_data *rd, bus_size_t size,
uint32_t txrx_ctrl)
{
int error;
error = bus_dmamem_alloc(dtag, &rd->rdata_desc,
BUS_DMA_WAITOK | BUS_DMA_ZERO,
&rd->rdata_dmap);
if (error) {
device_printf(sc->sc_dev, "can't allocate DMA mem\n");
return error;
}
error = bus_dmamap_load(dtag, rd->rdata_dmap, rd->rdata_desc, size,
bwi_dma_ring_addr, &rd->rdata_paddr,
BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->sc_dev, "can't load DMA mem\n");
bus_dmamem_free(dtag, rd->rdata_desc, rd->rdata_dmap);
rd->rdata_desc = NULL;
return error;
}
rd->rdata_txrx_ctrl = txrx_ctrl;
return 0;
}
static int
bwi_dma_txstats_alloc(struct bwi_softc *sc, uint32_t ctrl_base,
bus_size_t desc_sz)
{
struct bwi_txstats_data *st;
bus_size_t dma_size;
int error;
st = malloc(sizeof(*st), M_DEVBUF, M_NOWAIT | M_ZERO);
if (st == NULL) {
device_printf(sc->sc_dev, "can't allocate txstats data\n");
return ENOMEM;
}
sc->sc_txstats = st;
/*
* Create TX stats descriptor DMA stuffs
*/
dma_size = roundup(desc_sz * BWI_TXSTATS_NDESC, BWI_RING_ALIGN);
error = bus_dma_tag_create(sc->sc_parent_dtag,
BWI_RING_ALIGN,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
dma_size,
1,
dma_size,
0,
NULL, NULL,
&st->stats_ring_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create txstats ring "
"DMA tag\n");
return error;
}
error = bus_dmamem_alloc(st->stats_ring_dtag, &st->stats_ring,
BUS_DMA_WAITOK | BUS_DMA_ZERO,
&st->stats_ring_dmap);
if (error) {
device_printf(sc->sc_dev, "can't allocate txstats ring "
"DMA mem\n");
bus_dma_tag_destroy(st->stats_ring_dtag);
st->stats_ring_dtag = NULL;
return error;
}
error = bus_dmamap_load(st->stats_ring_dtag, st->stats_ring_dmap,
st->stats_ring, dma_size,
bwi_dma_ring_addr, &st->stats_ring_paddr,
BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->sc_dev, "can't load txstats ring DMA mem\n");
bus_dmamem_free(st->stats_ring_dtag, st->stats_ring,
st->stats_ring_dmap);
bus_dma_tag_destroy(st->stats_ring_dtag);
st->stats_ring_dtag = NULL;
return error;
}
/*
* Create TX stats DMA stuffs
*/
dma_size = roundup(sizeof(struct bwi_txstats) * BWI_TXSTATS_NDESC,
BWI_ALIGN);
error = bus_dma_tag_create(sc->sc_parent_dtag,
BWI_ALIGN,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
dma_size,
1,
dma_size,
0,
NULL, NULL,
&st->stats_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create txstats DMA tag\n");
return error;
}
error = bus_dmamem_alloc(st->stats_dtag, (void **)&st->stats,
BUS_DMA_WAITOK | BUS_DMA_ZERO,
&st->stats_dmap);
if (error) {
device_printf(sc->sc_dev, "can't allocate txstats DMA mem\n");
bus_dma_tag_destroy(st->stats_dtag);
st->stats_dtag = NULL;
return error;
}
error = bus_dmamap_load(st->stats_dtag, st->stats_dmap, st->stats,
dma_size, bwi_dma_ring_addr, &st->stats_paddr,
BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->sc_dev, "can't load txstats DMA mem\n");
bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap);
bus_dma_tag_destroy(st->stats_dtag);
st->stats_dtag = NULL;
return error;
}
st->stats_ctrl_base = ctrl_base;
return 0;
}
static void
bwi_dma_txstats_free(struct bwi_softc *sc)
{
struct bwi_txstats_data *st;
if (sc->sc_txstats == NULL)
return;
st = sc->sc_txstats;
if (st->stats_ring_dtag != NULL) {
bus_dmamap_unload(st->stats_ring_dtag, st->stats_ring_dmap);
bus_dmamem_free(st->stats_ring_dtag, st->stats_ring,
st->stats_ring_dmap);
bus_dma_tag_destroy(st->stats_ring_dtag);
}
if (st->stats_dtag != NULL) {
bus_dmamap_unload(st->stats_dtag, st->stats_dmap);
bus_dmamem_free(st->stats_dtag, st->stats, st->stats_dmap);
bus_dma_tag_destroy(st->stats_dtag);
}
free(st, M_DEVBUF);
}
static void
bwi_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
{
KASSERT(nseg == 1, ("too many segments\n"));
*((bus_addr_t *)arg) = seg->ds_addr;
}
static int
bwi_dma_mbuf_create(struct bwi_softc *sc)
{
struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
int i, j, k, ntx, error;
/*
* Create TX/RX mbuf DMA tag
*/
error = bus_dma_tag_create(sc->sc_parent_dtag,
1,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
MCLBYTES,
1,
MCLBYTES,
BUS_DMA_ALLOCNOW,
NULL, NULL,
&sc->sc_buf_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create mbuf DMA tag\n");
return error;
}
ntx = 0;
/*
* Create TX mbuf DMA map
*/
for (i = 0; i < BWI_TX_NRING; ++i) {
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i];
for (j = 0; j < BWI_TX_NDESC; ++j) {
error = bus_dmamap_create(sc->sc_buf_dtag, 0,
&tbd->tbd_buf[j].tb_dmap);
if (error) {
device_printf(sc->sc_dev, "can't create "
"%dth tbd, %dth DMA map\n", i, j);
ntx = i;
for (k = 0; k < j; ++k) {
bus_dmamap_destroy(sc->sc_buf_dtag,
tbd->tbd_buf[k].tb_dmap);
}
goto fail;
}
}
}
ntx = BWI_TX_NRING;
/*
* Create RX mbuf DMA map and a spare DMA map
*/
error = bus_dmamap_create(sc->sc_buf_dtag, 0,
&rbd->rbd_tmp_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't create spare RX buf DMA map\n");
goto fail;
}
for (j = 0; j < BWI_RX_NDESC; ++j) {
error = bus_dmamap_create(sc->sc_buf_dtag, 0,
&rbd->rbd_buf[j].rb_dmap);
if (error) {
device_printf(sc->sc_dev, "can't create %dth "
"RX buf DMA map\n", j);
for (k = 0; k < j; ++k) {
bus_dmamap_destroy(sc->sc_buf_dtag,
rbd->rbd_buf[j].rb_dmap);
}
bus_dmamap_destroy(sc->sc_buf_dtag,
rbd->rbd_tmp_dmap);
goto fail;
}
}
return 0;
fail:
bwi_dma_mbuf_destroy(sc, ntx, 0);
return error;
}
static void
bwi_dma_mbuf_destroy(struct bwi_softc *sc, int ntx, int nrx)
{
int i, j;
if (sc->sc_buf_dtag == NULL)
return;
for (i = 0; i < ntx; ++i) {
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[i];
for (j = 0; j < BWI_TX_NDESC; ++j) {
struct bwi_txbuf *tb = &tbd->tbd_buf[j];
if (tb->tb_mbuf != NULL) {
bus_dmamap_unload(sc->sc_buf_dtag,
tb->tb_dmap);
m_freem(tb->tb_mbuf);
}
if (tb->tb_ni != NULL)
ieee80211_free_node(tb->tb_ni);
bus_dmamap_destroy(sc->sc_buf_dtag, tb->tb_dmap);
}
}
if (nrx) {
struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
bus_dmamap_destroy(sc->sc_buf_dtag, rbd->rbd_tmp_dmap);
for (j = 0; j < BWI_RX_NDESC; ++j) {
struct bwi_rxbuf *rb = &rbd->rbd_buf[j];
if (rb->rb_mbuf != NULL) {
bus_dmamap_unload(sc->sc_buf_dtag,
rb->rb_dmap);
m_freem(rb->rb_mbuf);
}
bus_dmamap_destroy(sc->sc_buf_dtag, rb->rb_dmap);
}
}
bus_dma_tag_destroy(sc->sc_buf_dtag);
sc->sc_buf_dtag = NULL;
}
static void
bwi_enable_intrs(struct bwi_softc *sc, uint32_t enable_intrs)
{
CSR_SETBITS_4(sc, BWI_MAC_INTR_MASK, enable_intrs);
}
static void
bwi_disable_intrs(struct bwi_softc *sc, uint32_t disable_intrs)
{
CSR_CLRBITS_4(sc, BWI_MAC_INTR_MASK, disable_intrs);
}
static int
bwi_init_tx_ring32(struct bwi_softc *sc, int ring_idx)
{
struct bwi_ring_data *rd;
struct bwi_txbuf_data *tbd;
uint32_t val, addr_hi, addr_lo;
KASSERT(ring_idx < BWI_TX_NRING, ("ring_idx %d", ring_idx));
rd = &sc->sc_tx_rdata[ring_idx];
tbd = &sc->sc_tx_bdata[ring_idx];
tbd->tbd_idx = 0;
tbd->tbd_used = 0;
bzero(rd->rdata_desc, sizeof(struct bwi_desc32) * BWI_TX_NDESC);
bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap,
BUS_DMASYNC_PREWRITE);
addr_lo = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_ADDR_MASK);
addr_hi = __SHIFTOUT(rd->rdata_paddr, BWI_TXRX32_RINGINFO_FUNC_MASK);
val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) |
__SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX,
BWI_TXRX32_RINGINFO_FUNC_MASK);
CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, val);
val = __SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) |
BWI_TXRX32_CTRL_ENABLE;
CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, val);
return 0;
}
static void
bwi_init_rxdesc_ring32(struct bwi_softc *sc, uint32_t ctrl_base,
bus_addr_t paddr, int hdr_size, int ndesc)
{
uint32_t val, addr_hi, addr_lo;
addr_lo = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_ADDR_MASK);
addr_hi = __SHIFTOUT(paddr, BWI_TXRX32_RINGINFO_FUNC_MASK);
val = __SHIFTIN(addr_lo, BWI_TXRX32_RINGINFO_ADDR_MASK) |
__SHIFTIN(BWI_TXRX32_RINGINFO_FUNC_TXRX,
BWI_TXRX32_RINGINFO_FUNC_MASK);
CSR_WRITE_4(sc, ctrl_base + BWI_RX32_RINGINFO, val);
val = __SHIFTIN(hdr_size, BWI_RX32_CTRL_HDRSZ_MASK) |
__SHIFTIN(addr_hi, BWI_TXRX32_CTRL_ADDRHI_MASK) |
BWI_TXRX32_CTRL_ENABLE;
CSR_WRITE_4(sc, ctrl_base + BWI_RX32_CTRL, val);
CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX,
(ndesc - 1) * sizeof(struct bwi_desc32));
}
static int
bwi_init_rx_ring32(struct bwi_softc *sc)
{
struct bwi_ring_data *rd = &sc->sc_rx_rdata;
int i, error;
sc->sc_rx_bdata.rbd_idx = 0;
for (i = 0; i < BWI_RX_NDESC; ++i) {
error = bwi_newbuf(sc, i, 1);
if (error) {
device_printf(sc->sc_dev,
"can't allocate %dth RX buffer\n", i);
return error;
}
}
bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap,
BUS_DMASYNC_PREWRITE);
bwi_init_rxdesc_ring32(sc, rd->rdata_txrx_ctrl, rd->rdata_paddr,
sizeof(struct bwi_rxbuf_hdr), BWI_RX_NDESC);
return 0;
}
static int
bwi_init_txstats32(struct bwi_softc *sc)
{
struct bwi_txstats_data *st = sc->sc_txstats;
bus_addr_t stats_paddr;
int i;
bzero(st->stats, BWI_TXSTATS_NDESC * sizeof(struct bwi_txstats));
bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_PREWRITE);
st->stats_idx = 0;
stats_paddr = st->stats_paddr;
for (i = 0; i < BWI_TXSTATS_NDESC; ++i) {
bwi_setup_desc32(sc, st->stats_ring, BWI_TXSTATS_NDESC, i,
stats_paddr, sizeof(struct bwi_txstats), 0);
stats_paddr += sizeof(struct bwi_txstats);
}
bus_dmamap_sync(st->stats_ring_dtag, st->stats_ring_dmap,
BUS_DMASYNC_PREWRITE);
bwi_init_rxdesc_ring32(sc, st->stats_ctrl_base,
st->stats_ring_paddr, 0, BWI_TXSTATS_NDESC);
return 0;
}
static void
bwi_setup_rx_desc32(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr,
int buf_len)
{
struct bwi_ring_data *rd = &sc->sc_rx_rdata;
KASSERT(buf_idx < BWI_RX_NDESC, ("buf_idx %d", buf_idx));
bwi_setup_desc32(sc, rd->rdata_desc, BWI_RX_NDESC, buf_idx,
paddr, buf_len, 0);
}
static void
bwi_setup_tx_desc32(struct bwi_softc *sc, struct bwi_ring_data *rd,
int buf_idx, bus_addr_t paddr, int buf_len)
{
KASSERT(buf_idx < BWI_TX_NDESC, ("buf_idx %d", buf_idx));
bwi_setup_desc32(sc, rd->rdata_desc, BWI_TX_NDESC, buf_idx,
paddr, buf_len, 1);
}
static int
bwi_init_tx_ring64(struct bwi_softc *sc, int ring_idx)
{
/* TODO:64 */
return EOPNOTSUPP;
}
static int
bwi_init_rx_ring64(struct bwi_softc *sc)
{
/* TODO:64 */
return EOPNOTSUPP;
}
static int
bwi_init_txstats64(struct bwi_softc *sc)
{
/* TODO:64 */
return EOPNOTSUPP;
}
static void
bwi_setup_rx_desc64(struct bwi_softc *sc, int buf_idx, bus_addr_t paddr,
int buf_len)
{
/* TODO:64 */
}
static void
bwi_setup_tx_desc64(struct bwi_softc *sc, struct bwi_ring_data *rd,
int buf_idx, bus_addr_t paddr, int buf_len)
{
/* TODO:64 */
}
static void
bwi_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg,
bus_size_t mapsz __unused, int error)
{
if (!error) {
KASSERT(nseg == 1, ("too many segments(%d)\n", nseg));
*((bus_addr_t *)arg) = seg->ds_addr;
}
}
static int
bwi_newbuf(struct bwi_softc *sc, int buf_idx, int init)
{
struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
struct bwi_rxbuf *rxbuf = &rbd->rbd_buf[buf_idx];
struct bwi_rxbuf_hdr *hdr;
bus_dmamap_t map;
bus_addr_t paddr;
struct mbuf *m;
int error;
KASSERT(buf_idx < BWI_RX_NDESC, ("buf_idx %d", buf_idx));
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
error = ENOBUFS;
/*
* If the NIC is up and running, we need to:
* - Clear RX buffer's header.
* - Restore RX descriptor settings.
*/
if (init)
return error;
else
goto back;
}
m->m_len = m->m_pkthdr.len = MCLBYTES;
/*
* Try to load RX buf into temporary DMA map
*/
error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, rbd->rbd_tmp_dmap, m,
bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT);
if (error) {
m_freem(m);
/*
* See the comment above
*/
if (init)
return error;
else
goto back;
}
if (!init)
bus_dmamap_unload(sc->sc_buf_dtag, rxbuf->rb_dmap);
rxbuf->rb_mbuf = m;
rxbuf->rb_paddr = paddr;
/*
* Swap RX buf's DMA map with the loaded temporary one
*/
map = rxbuf->rb_dmap;
rxbuf->rb_dmap = rbd->rbd_tmp_dmap;
rbd->rbd_tmp_dmap = map;
back:
/*
* Clear RX buf header
*/
hdr = mtod(rxbuf->rb_mbuf, struct bwi_rxbuf_hdr *);
bzero(hdr, sizeof(*hdr));
bus_dmamap_sync(sc->sc_buf_dtag, rxbuf->rb_dmap, BUS_DMASYNC_PREWRITE);
/*
* Setup RX buf descriptor
*/
sc->sc_setup_rxdesc(sc, buf_idx, rxbuf->rb_paddr,
rxbuf->rb_mbuf->m_len - sizeof(*hdr));
return error;
}
static void
bwi_set_addr_filter(struct bwi_softc *sc, uint16_t addr_ofs,
const uint8_t *addr)
{
int i;
CSR_WRITE_2(sc, BWI_ADDR_FILTER_CTRL,
BWI_ADDR_FILTER_CTRL_SET | addr_ofs);
for (i = 0; i < (IEEE80211_ADDR_LEN / 2); ++i) {
uint16_t addr_val;
addr_val = (uint16_t)addr[i * 2] |
(((uint16_t)addr[(i * 2) + 1]) << 8);
CSR_WRITE_2(sc, BWI_ADDR_FILTER_DATA, addr_val);
}
}
static int
bwi_rxeof(struct bwi_softc *sc, int end_idx)
{
struct bwi_ring_data *rd = &sc->sc_rx_rdata;
struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int idx, rx_data = 0;
idx = rbd->rbd_idx;
while (idx != end_idx) {
struct bwi_rxbuf *rb = &rbd->rbd_buf[idx];
struct bwi_rxbuf_hdr *hdr;
struct ieee80211_frame_min *wh;
struct ieee80211_node *ni;
struct mbuf *m;
uint32_t plcp;
uint16_t flags2;
int buflen, wh_ofs, hdr_extra, rssi, noise, type, rate;
m = rb->rb_mbuf;
bus_dmamap_sync(sc->sc_buf_dtag, rb->rb_dmap,
BUS_DMASYNC_POSTREAD);
if (bwi_newbuf(sc, idx, 0)) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto next;
}
hdr = mtod(m, struct bwi_rxbuf_hdr *);
flags2 = le16toh(hdr->rxh_flags2);
hdr_extra = 0;
if (flags2 & BWI_RXH_F2_TYPE2FRAME)
hdr_extra = 2;
wh_ofs = hdr_extra + 6; /* XXX magic number */
buflen = le16toh(hdr->rxh_buflen);
if (buflen < BWI_FRAME_MIN_LEN(wh_ofs)) {
- if_printf(ifp, "%s: zero length data, hdr_extra %d\n",
- __func__, hdr_extra);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ device_printf(sc->sc_dev,
+ "%s: zero length data, hdr_extra %d\n",
+ __func__, hdr_extra);
+ counter_u64_add(ic->ic_ierrors, 1);
m_freem(m);
goto next;
}
bcopy((uint8_t *)(hdr + 1) + hdr_extra, &plcp, sizeof(plcp));
rssi = bwi_calc_rssi(sc, hdr);
noise = bwi_calc_noise(sc);
- m->m_pkthdr.rcvif = ifp;
m->m_len = m->m_pkthdr.len = buflen + sizeof(*hdr);
m_adj(m, sizeof(*hdr) + wh_ofs);
if (htole16(hdr->rxh_flags1) & BWI_RXH_F1_OFDM)
rate = bwi_plcp2rate(plcp, IEEE80211_T_OFDM);
else
rate = bwi_plcp2rate(plcp, IEEE80211_T_CCK);
/* RX radio tap */
if (ieee80211_radiotap_active(ic))
bwi_rx_radiotap(sc, m, hdr, &plcp, rate, rssi, noise);
m_adj(m, -IEEE80211_CRC_LEN);
BWI_UNLOCK(sc);
wh = mtod(m, struct ieee80211_frame_min *);
ni = ieee80211_find_rxnode(ic, wh);
if (ni != NULL) {
type = ieee80211_input(ni, m, rssi - noise, noise);
ieee80211_free_node(ni);
} else
type = ieee80211_input_all(ic, m, rssi - noise, noise);
if (type == IEEE80211_FC0_TYPE_DATA) {
rx_data = 1;
sc->sc_rx_rate = rate;
}
BWI_LOCK(sc);
next:
idx = (idx + 1) % BWI_RX_NDESC;
if (sc->sc_flags & BWI_F_STOP) {
/*
* Take the fast lane, don't do
* any damage to softc
*/
return -1;
}
}
rbd->rbd_idx = idx;
bus_dmamap_sync(sc->sc_rxring_dtag, rd->rdata_dmap,
BUS_DMASYNC_PREWRITE);
return rx_data;
}
static int
bwi_rxeof32(struct bwi_softc *sc)
{
uint32_t val, rx_ctrl;
int end_idx, rx_data;
rx_ctrl = sc->sc_rx_rdata.rdata_txrx_ctrl;
val = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS);
end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) /
sizeof(struct bwi_desc32);
rx_data = bwi_rxeof(sc, end_idx);
if (rx_data >= 0) {
CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_INDEX,
end_idx * sizeof(struct bwi_desc32));
}
return rx_data;
}
static int
bwi_rxeof64(struct bwi_softc *sc)
{
/* TODO:64 */
return 0;
}
static void
bwi_reset_rx_ring32(struct bwi_softc *sc, uint32_t rx_ctrl)
{
int i;
CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_CTRL, 0);
#define NRETRY 10
for (i = 0; i < NRETRY; ++i) {
uint32_t status;
status = CSR_READ_4(sc, rx_ctrl + BWI_RX32_STATUS);
if (__SHIFTOUT(status, BWI_RX32_STATUS_STATE_MASK) ==
BWI_RX32_STATUS_STATE_DISABLED)
break;
DELAY(1000);
}
if (i == NRETRY)
device_printf(sc->sc_dev, "reset rx ring timedout\n");
#undef NRETRY
CSR_WRITE_4(sc, rx_ctrl + BWI_RX32_RINGINFO, 0);
}
static void
bwi_free_txstats32(struct bwi_softc *sc)
{
bwi_reset_rx_ring32(sc, sc->sc_txstats->stats_ctrl_base);
}
static void
bwi_free_rx_ring32(struct bwi_softc *sc)
{
struct bwi_ring_data *rd = &sc->sc_rx_rdata;
struct bwi_rxbuf_data *rbd = &sc->sc_rx_bdata;
int i;
bwi_reset_rx_ring32(sc, rd->rdata_txrx_ctrl);
for (i = 0; i < BWI_RX_NDESC; ++i) {
struct bwi_rxbuf *rb = &rbd->rbd_buf[i];
if (rb->rb_mbuf != NULL) {
bus_dmamap_unload(sc->sc_buf_dtag, rb->rb_dmap);
m_freem(rb->rb_mbuf);
rb->rb_mbuf = NULL;
}
}
}
static void
bwi_free_tx_ring32(struct bwi_softc *sc, int ring_idx)
{
struct bwi_ring_data *rd;
struct bwi_txbuf_data *tbd;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t state, val;
int i;
KASSERT(ring_idx < BWI_TX_NRING, ("ring_idx %d", ring_idx));
rd = &sc->sc_tx_rdata[ring_idx];
tbd = &sc->sc_tx_bdata[ring_idx];
#define NRETRY 10
for (i = 0; i < NRETRY; ++i) {
val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS);
state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK);
if (state == BWI_TX32_STATUS_STATE_DISABLED ||
state == BWI_TX32_STATUS_STATE_IDLE ||
state == BWI_TX32_STATUS_STATE_STOPPED)
break;
DELAY(1000);
}
if (i == NRETRY) {
- if_printf(ifp, "%s: wait for TX ring(%d) stable timed out\n",
- __func__, ring_idx);
+ device_printf(sc->sc_dev,
+ "%s: wait for TX ring(%d) stable timed out\n",
+ __func__, ring_idx);
}
CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_CTRL, 0);
for (i = 0; i < NRETRY; ++i) {
val = CSR_READ_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_STATUS);
state = __SHIFTOUT(val, BWI_TX32_STATUS_STATE_MASK);
if (state == BWI_TX32_STATUS_STATE_DISABLED)
break;
DELAY(1000);
}
if (i == NRETRY)
- if_printf(ifp, "%s: reset TX ring (%d) timed out\n",
+ device_printf(sc->sc_dev, "%s: reset TX ring (%d) timed out\n",
__func__, ring_idx);
#undef NRETRY
DELAY(1000);
CSR_WRITE_4(sc, rd->rdata_txrx_ctrl + BWI_TX32_RINGINFO, 0);
for (i = 0; i < BWI_TX_NDESC; ++i) {
struct bwi_txbuf *tb = &tbd->tbd_buf[i];
if (tb->tb_mbuf != NULL) {
bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap);
m_freem(tb->tb_mbuf);
tb->tb_mbuf = NULL;
}
if (tb->tb_ni != NULL) {
ieee80211_free_node(tb->tb_ni);
tb->tb_ni = NULL;
}
}
}
static void
bwi_free_txstats64(struct bwi_softc *sc)
{
/* TODO:64 */
}
static void
bwi_free_rx_ring64(struct bwi_softc *sc)
{
/* TODO:64 */
}
static void
bwi_free_tx_ring64(struct bwi_softc *sc, int ring_idx)
{
/* TODO:64 */
}
/* XXX does not belong here */
#define IEEE80211_OFDM_PLCP_RATE_MASK __BITS(3, 0)
#define IEEE80211_OFDM_PLCP_LEN_MASK __BITS(16, 5)
static __inline void
bwi_ofdm_plcp_header(uint32_t *plcp0, int pkt_len, uint8_t rate)
{
uint32_t plcp;
plcp = __SHIFTIN(ieee80211_rate2plcp(rate, IEEE80211_T_OFDM),
IEEE80211_OFDM_PLCP_RATE_MASK) |
__SHIFTIN(pkt_len, IEEE80211_OFDM_PLCP_LEN_MASK);
*plcp0 = htole32(plcp);
}
static __inline void
bwi_ds_plcp_header(struct ieee80211_ds_plcp_hdr *plcp, int pkt_len,
uint8_t rate)
{
int len, service, pkt_bitlen;
pkt_bitlen = pkt_len * NBBY;
len = howmany(pkt_bitlen * 2, rate);
service = IEEE80211_PLCP_SERVICE_LOCKED;
if (rate == (11 * 2)) {
int pkt_bitlen1;
/*
* PLCP service field needs to be adjusted,
* if TX rate is 11Mbytes/s
*/
pkt_bitlen1 = len * 11;
if (pkt_bitlen1 - pkt_bitlen >= NBBY)
service |= IEEE80211_PLCP_SERVICE_LENEXT7;
}
plcp->i_signal = ieee80211_rate2plcp(rate, IEEE80211_T_CCK);
plcp->i_service = service;
plcp->i_length = htole16(len);
/* NOTE: do NOT touch i_crc */
}
static __inline void
bwi_plcp_header(const struct ieee80211_rate_table *rt,
void *plcp, int pkt_len, uint8_t rate)
{
enum ieee80211_phytype modtype;
/*
* Assume caller has zeroed 'plcp'
*/
modtype = ieee80211_rate2phytype(rt, rate);
if (modtype == IEEE80211_T_OFDM)
bwi_ofdm_plcp_header(plcp, pkt_len, rate);
else if (modtype == IEEE80211_T_DS)
bwi_ds_plcp_header(plcp, pkt_len, rate);
else
panic("unsupport modulation type %u\n", modtype);
}
static int
bwi_encap(struct bwi_softc *sc, int idx, struct mbuf *m,
struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct bwi_ring_data *rd = &sc->sc_tx_rdata[BWI_TX_DATA_RING];
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
struct bwi_txbuf *tb = &tbd->tbd_buf[idx];
struct bwi_mac *mac;
struct bwi_txbuf_hdr *hdr;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp;
uint8_t rate, rate_fb;
uint32_t mac_ctrl;
uint16_t phy_ctrl;
bus_addr_t paddr;
int type, ismcast, pkt_len, error, rix;
#if 0
const uint8_t *p;
int i;
#endif
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
/* Get 802.11 frame len before prepending TX header */
pkt_len = m->m_pkthdr.len + IEEE80211_CRC_LEN;
/*
* Find TX rate
*/
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) {
rate = rate_fb = tp->mgmtrate;
} else if (ismcast) {
rate = rate_fb = tp->mcastrate;
} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
rate = rate_fb = tp->ucastrate;
} else {
rix = ieee80211_ratectl_rate(ni, NULL, pkt_len);
rate = ni->ni_txrate;
if (rix > 0) {
rate_fb = ni->ni_rates.rs_rates[rix-1] &
IEEE80211_RATE_VAL;
} else {
rate_fb = rate;
}
}
tb->tb_rate[0] = rate;
tb->tb_rate[1] = rate_fb;
sc->sc_tx_rate = rate;
/*
* TX radio tap
*/
if (ieee80211_radiotap_active_vap(vap)) {
sc->sc_tx_th.wt_flags = 0;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_DS &&
(ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
rate != (1 * 2)) {
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
}
sc->sc_tx_th.wt_rate = rate;
ieee80211_radiotap_tx(vap, m);
}
/*
* Setup the embedded TX header
*/
M_PREPEND(m, sizeof(*hdr), M_NOWAIT);
if (m == NULL) {
- if_printf(ifp, "%s: prepend TX header failed\n", __func__);
+ device_printf(sc->sc_dev, "%s: prepend TX header failed\n",
+ __func__);
return ENOBUFS;
}
hdr = mtod(m, struct bwi_txbuf_hdr *);
bzero(hdr, sizeof(*hdr));
bcopy(wh->i_fc, hdr->txh_fc, sizeof(hdr->txh_fc));
bcopy(wh->i_addr1, hdr->txh_addr1, sizeof(hdr->txh_addr1));
if (!ismcast) {
uint16_t dur;
dur = ieee80211_ack_duration(sc->sc_rates, rate,
ic->ic_flags & ~IEEE80211_F_SHPREAMBLE);
hdr->txh_fb_duration = htole16(dur);
}
hdr->txh_id = __SHIFTIN(BWI_TX_DATA_RING, BWI_TXH_ID_RING_MASK) |
__SHIFTIN(idx, BWI_TXH_ID_IDX_MASK);
bwi_plcp_header(sc->sc_rates, hdr->txh_plcp, pkt_len, rate);
bwi_plcp_header(sc->sc_rates, hdr->txh_fb_plcp, pkt_len, rate_fb);
phy_ctrl = __SHIFTIN(mac->mac_rf.rf_ant_mode,
BWI_TXH_PHY_C_ANTMODE_MASK);
if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM)
phy_ctrl |= BWI_TXH_PHY_C_OFDM;
else if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && rate != (2 * 1))
phy_ctrl |= BWI_TXH_PHY_C_SHPREAMBLE;
mac_ctrl = BWI_TXH_MAC_C_HWSEQ | BWI_TXH_MAC_C_FIRST_FRAG;
if (!ismcast)
mac_ctrl |= BWI_TXH_MAC_C_ACK;
if (ieee80211_rate2phytype(sc->sc_rates, rate_fb) == IEEE80211_T_OFDM)
mac_ctrl |= BWI_TXH_MAC_C_FB_OFDM;
hdr->txh_mac_ctrl = htole32(mac_ctrl);
hdr->txh_phy_ctrl = htole16(phy_ctrl);
/* Catch any further usage */
hdr = NULL;
wh = NULL;
/* DMA load */
error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m,
bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT);
if (error && error != EFBIG) {
- if_printf(ifp, "%s: can't load TX buffer (1) %d\n",
+ device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n",
__func__, error);
goto back;
}
if (error) { /* error == EFBIG */
struct mbuf *m_new;
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
- if_printf(ifp, "%s: can't defrag TX buffer\n",
- __func__);
+ device_printf(sc->sc_dev,
+ "%s: can't defrag TX buffer\n", __func__);
error = ENOBUFS;
goto back;
} else {
m = m_new;
}
error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m,
bwi_dma_buf_addr, &paddr,
BUS_DMA_NOWAIT);
if (error) {
- if_printf(ifp, "%s: can't load TX buffer (2) %d\n",
+ device_printf(sc->sc_dev,
+ "%s: can't load TX buffer (2) %d\n",
__func__, error);
goto back;
}
}
error = 0;
bus_dmamap_sync(sc->sc_buf_dtag, tb->tb_dmap, BUS_DMASYNC_PREWRITE);
tb->tb_mbuf = m;
tb->tb_ni = ni;
#if 0
p = mtod(m, const uint8_t *);
for (i = 0; i < m->m_pkthdr.len; ++i) {
if (i != 0 && i % 8 == 0)
printf("\n");
printf("%02x ", p[i]);
}
printf("\n");
#endif
DPRINTF(sc, BWI_DBG_TX, "idx %d, pkt_len %d, buflen %d\n",
idx, pkt_len, m->m_pkthdr.len);
/* Setup TX descriptor */
sc->sc_setup_txdesc(sc, rd, idx, paddr, m->m_pkthdr.len);
bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap,
BUS_DMASYNC_PREWRITE);
/* Kick start */
sc->sc_start_tx(sc, rd->rdata_txrx_ctrl, idx);
back:
if (error)
m_freem(m);
return error;
}
static int
bwi_encap_raw(struct bwi_softc *sc, int idx, struct mbuf *m,
struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
{
- struct ifnet *ifp = sc->sc_ifp;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct bwi_ring_data *rd = &sc->sc_tx_rdata[BWI_TX_DATA_RING];
struct bwi_txbuf_data *tbd = &sc->sc_tx_bdata[BWI_TX_DATA_RING];
struct bwi_txbuf *tb = &tbd->tbd_buf[idx];
struct bwi_mac *mac;
struct bwi_txbuf_hdr *hdr;
struct ieee80211_frame *wh;
uint8_t rate, rate_fb;
uint32_t mac_ctrl;
uint16_t phy_ctrl;
bus_addr_t paddr;
int ismcast, pkt_len, error;
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
wh = mtod(m, struct ieee80211_frame *);
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
/* Get 802.11 frame len before prepending TX header */
pkt_len = m->m_pkthdr.len + IEEE80211_CRC_LEN;
/*
* Find TX rate
*/
rate = params->ibp_rate0;
if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
/* XXX fall back to mcast/mgmt rate? */
m_freem(m);
return EINVAL;
}
if (params->ibp_try1 != 0) {
rate_fb = params->ibp_rate1;
if (!ieee80211_isratevalid(ic->ic_rt, rate_fb)) {
/* XXX fall back to rate0? */
m_freem(m);
return EINVAL;
}
} else
rate_fb = rate;
tb->tb_rate[0] = rate;
tb->tb_rate[1] = rate_fb;
sc->sc_tx_rate = rate;
/*
* TX radio tap
*/
if (ieee80211_radiotap_active_vap(vap)) {
sc->sc_tx_th.wt_flags = 0;
/* XXX IEEE80211_BPF_CRYPTO */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
sc->sc_tx_th.wt_rate = rate;
ieee80211_radiotap_tx(vap, m);
}
/*
* Setup the embedded TX header
*/
M_PREPEND(m, sizeof(*hdr), M_NOWAIT);
if (m == NULL) {
- if_printf(ifp, "%s: prepend TX header failed\n", __func__);
+ device_printf(sc->sc_dev, "%s: prepend TX header failed\n",
+ __func__);
return ENOBUFS;
}
hdr = mtod(m, struct bwi_txbuf_hdr *);
bzero(hdr, sizeof(*hdr));
bcopy(wh->i_fc, hdr->txh_fc, sizeof(hdr->txh_fc));
bcopy(wh->i_addr1, hdr->txh_addr1, sizeof(hdr->txh_addr1));
mac_ctrl = BWI_TXH_MAC_C_HWSEQ | BWI_TXH_MAC_C_FIRST_FRAG;
if (!ismcast && (params->ibp_flags & IEEE80211_BPF_NOACK) == 0) {
uint16_t dur;
dur = ieee80211_ack_duration(sc->sc_rates, rate_fb, 0);
hdr->txh_fb_duration = htole16(dur);
mac_ctrl |= BWI_TXH_MAC_C_ACK;
}
hdr->txh_id = __SHIFTIN(BWI_TX_DATA_RING, BWI_TXH_ID_RING_MASK) |
__SHIFTIN(idx, BWI_TXH_ID_IDX_MASK);
bwi_plcp_header(sc->sc_rates, hdr->txh_plcp, pkt_len, rate);
bwi_plcp_header(sc->sc_rates, hdr->txh_fb_plcp, pkt_len, rate_fb);
phy_ctrl = __SHIFTIN(mac->mac_rf.rf_ant_mode,
BWI_TXH_PHY_C_ANTMODE_MASK);
if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) {
phy_ctrl |= BWI_TXH_PHY_C_OFDM;
mac_ctrl |= BWI_TXH_MAC_C_FB_OFDM;
} else if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
phy_ctrl |= BWI_TXH_PHY_C_SHPREAMBLE;
hdr->txh_mac_ctrl = htole32(mac_ctrl);
hdr->txh_phy_ctrl = htole16(phy_ctrl);
/* Catch any further usage */
hdr = NULL;
wh = NULL;
/* DMA load */
error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m,
bwi_dma_buf_addr, &paddr, BUS_DMA_NOWAIT);
if (error != 0) {
struct mbuf *m_new;
if (error != EFBIG) {
- if_printf(ifp, "%s: can't load TX buffer (1) %d\n",
+ device_printf(sc->sc_dev,
+ "%s: can't load TX buffer (1) %d\n",
__func__, error);
goto back;
}
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
- if_printf(ifp, "%s: can't defrag TX buffer\n",
- __func__);
+ device_printf(sc->sc_dev,
+ "%s: can't defrag TX buffer\n", __func__);
error = ENOBUFS;
goto back;
}
m = m_new;
error = bus_dmamap_load_mbuf(sc->sc_buf_dtag, tb->tb_dmap, m,
bwi_dma_buf_addr, &paddr,
BUS_DMA_NOWAIT);
if (error) {
- if_printf(ifp, "%s: can't load TX buffer (2) %d\n",
+ device_printf(sc->sc_dev,
+ "%s: can't load TX buffer (2) %d\n",
__func__, error);
goto back;
}
}
bus_dmamap_sync(sc->sc_buf_dtag, tb->tb_dmap, BUS_DMASYNC_PREWRITE);
tb->tb_mbuf = m;
tb->tb_ni = ni;
DPRINTF(sc, BWI_DBG_TX, "idx %d, pkt_len %d, buflen %d\n",
idx, pkt_len, m->m_pkthdr.len);
/* Setup TX descriptor */
sc->sc_setup_txdesc(sc, rd, idx, paddr, m->m_pkthdr.len);
bus_dmamap_sync(sc->sc_txring_dtag, rd->rdata_dmap,
BUS_DMASYNC_PREWRITE);
/* Kick start */
sc->sc_start_tx(sc, rd->rdata_txrx_ctrl, idx);
back:
if (error)
m_freem(m);
return error;
}
static void
bwi_start_tx32(struct bwi_softc *sc, uint32_t tx_ctrl, int idx)
{
idx = (idx + 1) % BWI_TX_NDESC;
CSR_WRITE_4(sc, tx_ctrl + BWI_TX32_INDEX,
idx * sizeof(struct bwi_desc32));
}
static void
bwi_start_tx64(struct bwi_softc *sc, uint32_t tx_ctrl, int idx)
{
/* TODO:64 */
}
static void
bwi_txeof_status32(struct bwi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
uint32_t val, ctrl_base;
int end_idx;
ctrl_base = sc->sc_txstats->stats_ctrl_base;
val = CSR_READ_4(sc, ctrl_base + BWI_RX32_STATUS);
end_idx = __SHIFTOUT(val, BWI_RX32_STATUS_INDEX_MASK) /
sizeof(struct bwi_desc32);
bwi_txeof_status(sc, end_idx);
CSR_WRITE_4(sc, ctrl_base + BWI_RX32_INDEX,
end_idx * sizeof(struct bwi_desc32));
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0)
- ifp->if_start(ifp);
+ bwi_start_locked(sc);
}
static void
bwi_txeof_status64(struct bwi_softc *sc)
{
/* TODO:64 */
}
static void
_bwi_txeof(struct bwi_softc *sc, uint16_t tx_id, int acked, int data_txcnt)
{
- struct ifnet *ifp = sc->sc_ifp;
struct bwi_txbuf_data *tbd;
struct bwi_txbuf *tb;
int ring_idx, buf_idx;
struct ieee80211_node *ni;
struct ieee80211vap *vap;
if (tx_id == 0) {
- if_printf(ifp, "%s: zero tx id\n", __func__);
+ device_printf(sc->sc_dev, "%s: zero tx id\n", __func__);
return;
}
ring_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_RING_MASK);
buf_idx = __SHIFTOUT(tx_id, BWI_TXH_ID_IDX_MASK);
KASSERT(ring_idx == BWI_TX_DATA_RING, ("ring_idx %d", ring_idx));
KASSERT(buf_idx < BWI_TX_NDESC, ("buf_idx %d", buf_idx));
tbd = &sc->sc_tx_bdata[ring_idx];
KASSERT(tbd->tbd_used > 0, ("tbd_used %d", tbd->tbd_used));
tbd->tbd_used--;
tb = &tbd->tbd_buf[buf_idx];
DPRINTF(sc, BWI_DBG_TXEOF, "txeof idx %d, "
"acked %d, data_txcnt %d, ni %p\n",
buf_idx, acked, data_txcnt, tb->tb_ni);
bus_dmamap_unload(sc->sc_buf_dtag, tb->tb_dmap);
- ni = tb->tb_ni;
- if (tb->tb_ni != NULL) {
+ if ((ni = tb->tb_ni) != NULL) {
const struct bwi_txbuf_hdr *hdr =
mtod(tb->tb_mbuf, const struct bwi_txbuf_hdr *);
vap = ni->ni_vap;
/* NB: update rate control only for unicast frames */
if (hdr->txh_mac_ctrl & htole32(BWI_TXH_MAC_C_ACK)) {
/*
* Feed back 'acked and data_txcnt'. Note that the
* generic AMRR code only understands one tx rate
* and the estimator doesn't handle real retry counts
* well so to avoid over-aggressive downshifting we
* treat any number of retries as "1".
*/
ieee80211_ratectl_tx_complete(vap, ni,
(data_txcnt > 1) ? IEEE80211_RATECTL_TX_SUCCESS :
IEEE80211_RATECTL_TX_FAILURE, &acked, NULL);
}
-
- /*
- * Do any tx complete callback. Note this must
- * be done before releasing the node reference.
- */
- if (tb->tb_mbuf->m_flags & M_TXCB)
- ieee80211_process_callback(ni, tb->tb_mbuf, !acked);
-
- ieee80211_free_node(tb->tb_ni);
+ ieee80211_tx_complete(ni, tb->tb_mbuf, !acked);
tb->tb_ni = NULL;
- }
- m_freem(tb->tb_mbuf);
+ } else
+ m_freem(tb->tb_mbuf);
tb->tb_mbuf = NULL;
if (tbd->tbd_used == 0)
sc->sc_tx_timer = 0;
-
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
static void
bwi_txeof_status(struct bwi_softc *sc, int end_idx)
{
struct bwi_txstats_data *st = sc->sc_txstats;
int idx;
bus_dmamap_sync(st->stats_dtag, st->stats_dmap, BUS_DMASYNC_POSTREAD);
idx = st->stats_idx;
while (idx != end_idx) {
const struct bwi_txstats *stats = &st->stats[idx];
if ((stats->txs_flags & BWI_TXS_F_PENDING) == 0) {
int data_txcnt;
data_txcnt = __SHIFTOUT(stats->txs_txcnt,
BWI_TXS_TXCNT_DATA);
_bwi_txeof(sc, le16toh(stats->txs_id),
stats->txs_flags & BWI_TXS_F_ACKED,
data_txcnt);
}
idx = (idx + 1) % BWI_TXSTATS_NDESC;
}
st->stats_idx = idx;
}
static void
bwi_txeof(struct bwi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
for (;;) {
uint32_t tx_status0, tx_status1;
uint16_t tx_id;
int data_txcnt;
tx_status0 = CSR_READ_4(sc, BWI_TXSTATUS0);
if ((tx_status0 & BWI_TXSTATUS0_VALID) == 0)
break;
tx_status1 = CSR_READ_4(sc, BWI_TXSTATUS1);
tx_id = __SHIFTOUT(tx_status0, BWI_TXSTATUS0_TXID_MASK);
data_txcnt = __SHIFTOUT(tx_status0,
BWI_TXSTATUS0_DATA_TXCNT_MASK);
if (tx_status0 & (BWI_TXSTATUS0_AMPDU | BWI_TXSTATUS0_PENDING))
continue;
_bwi_txeof(sc, le16toh(tx_id), tx_status0 & BWI_TXSTATUS0_ACKED,
data_txcnt);
}
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0)
- ifp->if_start(ifp);
+ bwi_start_locked(sc);
}
static int
bwi_bbp_power_on(struct bwi_softc *sc, enum bwi_clock_mode clk_mode)
{
bwi_power_on(sc, 1);
return bwi_set_clock_mode(sc, clk_mode);
}
static void
bwi_bbp_power_off(struct bwi_softc *sc)
{
bwi_set_clock_mode(sc, BWI_CLOCK_MODE_SLOW);
bwi_power_off(sc, 1);
}
static int
bwi_get_pwron_delay(struct bwi_softc *sc)
{
struct bwi_regwin *com, *old;
struct bwi_clock_freq freq;
uint32_t val;
int error;
com = &sc->sc_com_regwin;
KASSERT(BWI_REGWIN_EXIST(com), ("no regwin"));
if ((sc->sc_cap & BWI_CAP_CLKMODE) == 0)
return 0;
error = bwi_regwin_switch(sc, com, &old);
if (error)
return error;
bwi_get_clock_freq(sc, &freq);
val = CSR_READ_4(sc, BWI_PLL_ON_DELAY);
sc->sc_pwron_delay = howmany((val + 2) * 1000000, freq.clkfreq_min);
DPRINTF(sc, BWI_DBG_ATTACH, "power on delay %u\n", sc->sc_pwron_delay);
return bwi_regwin_switch(sc, old, NULL);
}
static int
bwi_bus_attach(struct bwi_softc *sc)
{
struct bwi_regwin *bus, *old;
int error;
bus = &sc->sc_bus_regwin;
error = bwi_regwin_switch(sc, bus, &old);
if (error)
return error;
if (!bwi_regwin_is_enabled(sc, bus))
bwi_regwin_enable(sc, bus, 0);
/* Disable interripts */
CSR_WRITE_4(sc, BWI_INTRVEC, 0);
return bwi_regwin_switch(sc, old, NULL);
}
static const char *
bwi_regwin_name(const struct bwi_regwin *rw)
{
switch (rw->rw_type) {
case BWI_REGWIN_T_COM:
return "COM";
case BWI_REGWIN_T_BUSPCI:
return "PCI";
case BWI_REGWIN_T_MAC:
return "MAC";
case BWI_REGWIN_T_BUSPCIE:
return "PCIE";
}
panic("unknown regwin type 0x%04x\n", rw->rw_type);
return NULL;
}
static uint32_t
bwi_regwin_disable_bits(struct bwi_softc *sc)
{
uint32_t busrev;
/* XXX cache this */
busrev = __SHIFTOUT(CSR_READ_4(sc, BWI_ID_LO), BWI_ID_LO_BUSREV_MASK);
DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT | BWI_DBG_MISC,
"bus rev %u\n", busrev);
if (busrev == BWI_BUSREV_0)
return BWI_STATE_LO_DISABLE1;
else if (busrev == BWI_BUSREV_1)
return BWI_STATE_LO_DISABLE2;
else
return (BWI_STATE_LO_DISABLE1 | BWI_STATE_LO_DISABLE2);
}
int
bwi_regwin_is_enabled(struct bwi_softc *sc, struct bwi_regwin *rw)
{
uint32_t val, disable_bits;
disable_bits = bwi_regwin_disable_bits(sc);
val = CSR_READ_4(sc, BWI_STATE_LO);
if ((val & (BWI_STATE_LO_CLOCK |
BWI_STATE_LO_RESET |
disable_bits)) == BWI_STATE_LO_CLOCK) {
DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, "%s is enabled\n",
bwi_regwin_name(rw));
return 1;
} else {
DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT, "%s is disabled\n",
bwi_regwin_name(rw));
return 0;
}
}
void
bwi_regwin_disable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags)
{
uint32_t state_lo, disable_bits;
int i;
state_lo = CSR_READ_4(sc, BWI_STATE_LO);
/*
* If current regwin is in 'reset' state, it was already disabled.
*/
if (state_lo & BWI_STATE_LO_RESET) {
DPRINTF(sc, BWI_DBG_ATTACH | BWI_DBG_INIT,
"%s was already disabled\n", bwi_regwin_name(rw));
return;
}
disable_bits = bwi_regwin_disable_bits(sc);
/*
* Disable normal clock
*/
state_lo = BWI_STATE_LO_CLOCK | disable_bits;
CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
/*
* Wait until normal clock is disabled
*/
#define NRETRY 1000
for (i = 0; i < NRETRY; ++i) {
state_lo = CSR_READ_4(sc, BWI_STATE_LO);
if (state_lo & disable_bits)
break;
DELAY(10);
}
if (i == NRETRY) {
device_printf(sc->sc_dev, "%s disable clock timeout\n",
bwi_regwin_name(rw));
}
for (i = 0; i < NRETRY; ++i) {
uint32_t state_hi;
state_hi = CSR_READ_4(sc, BWI_STATE_HI);
if ((state_hi & BWI_STATE_HI_BUSY) == 0)
break;
DELAY(10);
}
if (i == NRETRY) {
device_printf(sc->sc_dev, "%s wait BUSY unset timeout\n",
bwi_regwin_name(rw));
}
#undef NRETRY
/*
* Reset and disable regwin with gated clock
*/
state_lo = BWI_STATE_LO_RESET | disable_bits |
BWI_STATE_LO_CLOCK | BWI_STATE_LO_GATED_CLOCK |
__SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_STATE_LO);
DELAY(1);
/* Reset and disable regwin */
state_lo = BWI_STATE_LO_RESET | disable_bits |
__SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_STATE_LO);
DELAY(1);
}
void
bwi_regwin_enable(struct bwi_softc *sc, struct bwi_regwin *rw, uint32_t flags)
{
uint32_t state_lo, state_hi, imstate;
bwi_regwin_disable(sc, rw, flags);
/* Reset regwin with gated clock */
state_lo = BWI_STATE_LO_RESET |
BWI_STATE_LO_CLOCK |
BWI_STATE_LO_GATED_CLOCK |
__SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_STATE_LO);
DELAY(1);
state_hi = CSR_READ_4(sc, BWI_STATE_HI);
if (state_hi & BWI_STATE_HI_SERROR)
CSR_WRITE_4(sc, BWI_STATE_HI, 0);
imstate = CSR_READ_4(sc, BWI_IMSTATE);
if (imstate & (BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT)) {
imstate &= ~(BWI_IMSTATE_INBAND_ERR | BWI_IMSTATE_TIMEOUT);
CSR_WRITE_4(sc, BWI_IMSTATE, imstate);
}
/* Enable regwin with gated clock */
state_lo = BWI_STATE_LO_CLOCK |
BWI_STATE_LO_GATED_CLOCK |
__SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_STATE_LO);
DELAY(1);
/* Enable regwin with normal clock */
state_lo = BWI_STATE_LO_CLOCK |
__SHIFTIN(flags, BWI_STATE_LO_FLAGS_MASK);
CSR_WRITE_4(sc, BWI_STATE_LO, state_lo);
/* Flush pending bus write */
CSR_READ_4(sc, BWI_STATE_LO);
DELAY(1);
}
static void
bwi_set_bssid(struct bwi_softc *sc, const uint8_t *bssid)
{
- struct ifnet *ifp = sc->sc_ifp;
struct bwi_mac *mac;
struct bwi_myaddr_bssid buf;
const uint8_t *p;
uint32_t val;
int n, i;
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
bwi_set_addr_filter(sc, BWI_ADDR_FILTER_BSSID, bssid);
- bcopy(IF_LLADDR(ifp), buf.myaddr, sizeof(buf.myaddr));
+ bcopy(sc->sc_ic.ic_macaddr, buf.myaddr, sizeof(buf.myaddr));
bcopy(bssid, buf.bssid, sizeof(buf.bssid));
n = sizeof(buf) / sizeof(val);
p = (const uint8_t *)&buf;
for (i = 0; i < n; ++i) {
int j;
val = 0;
for (j = 0; j < sizeof(val); ++j)
val |= ((uint32_t)(*p++)) << (j * 8);
TMPLT_WRITE_4(mac, 0x20 + (i * sizeof(val)), val);
}
}
static void
bwi_updateslot(struct ieee80211com *ic)
{
struct bwi_softc *sc = ic->ic_softc;
struct bwi_mac *mac;
BWI_LOCK(sc);
- if (ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (sc->sc_flags & BWI_F_RUNNING) {
DPRINTF(sc, BWI_DBG_80211, "%s\n", __func__);
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
bwi_mac_updateslot(mac, (ic->ic_flags & IEEE80211_F_SHSLOT));
}
BWI_UNLOCK(sc);
}
static void
bwi_calibrate(void *xsc)
{
struct bwi_softc *sc = xsc;
-#ifdef INVARIANTS
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
-#endif
struct bwi_mac *mac;
BWI_ASSERT_LOCKED(sc);
- KASSERT(ic->ic_opmode != IEEE80211_M_MONITOR,
- ("opmode %d", ic->ic_opmode));
+ KASSERT(sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR,
+ ("opmode %d", sc->sc_ic.ic_opmode));
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
bwi_mac_calibrate_txpower(mac, sc->sc_txpwrcb_type);
sc->sc_txpwrcb_type = BWI_TXPWR_CALIB;
/* XXX 15 seconds */
callout_reset(&sc->sc_calib_ch, hz * 15, bwi_calibrate, sc);
}
static int
bwi_calc_rssi(struct bwi_softc *sc, const struct bwi_rxbuf_hdr *hdr)
{
struct bwi_mac *mac;
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
return bwi_rf_calc_rssi(mac, hdr);
}
static int
bwi_calc_noise(struct bwi_softc *sc)
{
struct bwi_mac *mac;
KASSERT(sc->sc_cur_regwin->rw_type == BWI_REGWIN_T_MAC,
("current regwin type %d", sc->sc_cur_regwin->rw_type));
mac = (struct bwi_mac *)sc->sc_cur_regwin;
return bwi_rf_calc_noise(mac);
}
static __inline uint8_t
bwi_plcp2rate(const uint32_t plcp0, enum ieee80211_phytype type)
{
uint32_t plcp = le32toh(plcp0) & IEEE80211_OFDM_PLCP_RATE_MASK;
return (ieee80211_plcp2rate(plcp, type));
}
static void
bwi_rx_radiotap(struct bwi_softc *sc, struct mbuf *m,
struct bwi_rxbuf_hdr *hdr, const void *plcp, int rate, int rssi, int noise)
{
const struct ieee80211_frame_min *wh;
sc->sc_rx_th.wr_flags = IEEE80211_RADIOTAP_F_FCS;
if (htole16(hdr->rxh_flags1) & BWI_RXH_F1_SHPREAMBLE)
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
wh = mtod(m, const struct ieee80211_frame_min *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP;
sc->sc_rx_th.wr_tsf = hdr->rxh_tsf; /* No endian convertion */
sc->sc_rx_th.wr_rate = rate;
sc->sc_rx_th.wr_antsignal = rssi;
sc->sc_rx_th.wr_antnoise = noise;
}
static void
bwi_led_attach(struct bwi_softc *sc)
{
const uint8_t *led_act = NULL;
uint16_t gpio, val[BWI_LED_MAX];
int i;
#define N(arr) (int)(sizeof(arr) / sizeof(arr[0]))
for (i = 0; i < N(bwi_vendor_led_act); ++i) {
if (sc->sc_pci_subvid == bwi_vendor_led_act[i].vid) {
led_act = bwi_vendor_led_act[i].led_act;
break;
}
}
if (led_act == NULL)
led_act = bwi_default_led_act;
#undef N
gpio = bwi_read_sprom(sc, BWI_SPROM_GPIO01);
val[0] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_0);
val[1] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_1);
gpio = bwi_read_sprom(sc, BWI_SPROM_GPIO23);
val[2] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_2);
val[3] = __SHIFTOUT(gpio, BWI_SPROM_GPIO_3);
for (i = 0; i < BWI_LED_MAX; ++i) {
struct bwi_led *led = &sc->sc_leds[i];
if (val[i] == 0xff) {
led->l_act = led_act[i];
} else {
if (val[i] & BWI_LED_ACT_LOW)
led->l_flags |= BWI_LED_F_ACTLOW;
led->l_act = __SHIFTOUT(val[i], BWI_LED_ACT_MASK);
}
led->l_mask = (1 << i);
if (led->l_act == BWI_LED_ACT_BLINK_SLOW ||
led->l_act == BWI_LED_ACT_BLINK_POLL ||
led->l_act == BWI_LED_ACT_BLINK) {
led->l_flags |= BWI_LED_F_BLINK;
if (led->l_act == BWI_LED_ACT_BLINK_POLL)
led->l_flags |= BWI_LED_F_POLLABLE;
else if (led->l_act == BWI_LED_ACT_BLINK_SLOW)
led->l_flags |= BWI_LED_F_SLOW;
if (sc->sc_blink_led == NULL) {
sc->sc_blink_led = led;
if (led->l_flags & BWI_LED_F_SLOW)
BWI_LED_SLOWDOWN(sc->sc_led_idle);
}
}
DPRINTF(sc, BWI_DBG_LED | BWI_DBG_ATTACH,
"%dth led, act %d, lowact %d\n", i,
led->l_act, led->l_flags & BWI_LED_F_ACTLOW);
}
callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0);
}
static __inline uint16_t
bwi_led_onoff(const struct bwi_led *led, uint16_t val, int on)
{
if (led->l_flags & BWI_LED_F_ACTLOW)
on = !on;
if (on)
val |= led->l_mask;
else
val &= ~led->l_mask;
return val;
}
static void
bwi_led_newstate(struct bwi_softc *sc, enum ieee80211_state nstate)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t val;
int i;
if (nstate == IEEE80211_S_INIT) {
callout_stop(&sc->sc_led_blink_ch);
sc->sc_led_blinking = 0;
}
- if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ if ((sc->sc_flags & BWI_F_RUNNING) == 0)
return;
val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL);
for (i = 0; i < BWI_LED_MAX; ++i) {
struct bwi_led *led = &sc->sc_leds[i];
int on;
if (led->l_act == BWI_LED_ACT_UNKN ||
led->l_act == BWI_LED_ACT_NULL)
continue;
if ((led->l_flags & BWI_LED_F_BLINK) &&
nstate != IEEE80211_S_INIT)
continue;
switch (led->l_act) {
case BWI_LED_ACT_ON: /* Always on */
on = 1;
break;
case BWI_LED_ACT_OFF: /* Always off */
case BWI_LED_ACT_5GHZ: /* TODO: 11A */
on = 0;
break;
default:
on = 1;
switch (nstate) {
case IEEE80211_S_INIT:
on = 0;
break;
case IEEE80211_S_RUN:
if (led->l_act == BWI_LED_ACT_11G &&
ic->ic_curmode != IEEE80211_MODE_11G)
on = 0;
break;
default:
if (led->l_act == BWI_LED_ACT_ASSOC)
on = 0;
break;
}
break;
}
val = bwi_led_onoff(led, val, on);
}
CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val);
}
static void
bwi_led_event(struct bwi_softc *sc, int event)
{
struct bwi_led *led = sc->sc_blink_led;
int rate;
if (event == BWI_LED_EVENT_POLL) {
if ((led->l_flags & BWI_LED_F_POLLABLE) == 0)
return;
if (ticks - sc->sc_led_ticks < sc->sc_led_idle)
return;
}
sc->sc_led_ticks = ticks;
if (sc->sc_led_blinking)
return;
switch (event) {
case BWI_LED_EVENT_RX:
rate = sc->sc_rx_rate;
break;
case BWI_LED_EVENT_TX:
rate = sc->sc_tx_rate;
break;
case BWI_LED_EVENT_POLL:
rate = 0;
break;
default:
panic("unknown LED event %d\n", event);
break;
}
bwi_led_blink_start(sc, bwi_led_duration[rate].on_dur,
bwi_led_duration[rate].off_dur);
}
static void
bwi_led_blink_start(struct bwi_softc *sc, int on_dur, int off_dur)
{
struct bwi_led *led = sc->sc_blink_led;
uint16_t val;
val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL);
val = bwi_led_onoff(led, val, 1);
CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val);
if (led->l_flags & BWI_LED_F_SLOW) {
BWI_LED_SLOWDOWN(on_dur);
BWI_LED_SLOWDOWN(off_dur);
}
sc->sc_led_blinking = 1;
sc->sc_led_blink_offdur = off_dur;
callout_reset(&sc->sc_led_blink_ch, on_dur, bwi_led_blink_next, sc);
}
static void
bwi_led_blink_next(void *xsc)
{
struct bwi_softc *sc = xsc;
uint16_t val;
val = CSR_READ_2(sc, BWI_MAC_GPIO_CTRL);
val = bwi_led_onoff(sc->sc_blink_led, val, 0);
CSR_WRITE_2(sc, BWI_MAC_GPIO_CTRL, val);
callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur,
bwi_led_blink_end, sc);
}
static void
bwi_led_blink_end(void *xsc)
{
struct bwi_softc *sc = xsc;
sc->sc_led_blinking = 0;
}
static void
bwi_restart(void *xsc, int pending)
{
struct bwi_softc *sc = xsc;
- struct ifnet *ifp = sc->sc_ifp;
- if_printf(ifp, "%s begin, help!\n", __func__);
+ device_printf(sc->sc_dev, "%s begin, help!\n", __func__);
BWI_LOCK(sc);
- bwi_init_statechg(xsc, 0);
+ bwi_init_statechg(sc, 0);
#if 0
- bwi_start_locked(ifp);
+ bwi_start_locked(sc);
#endif
BWI_UNLOCK(sc);
}
Index: head/sys/dev/bwi/if_bwivar.h
===================================================================
--- head/sys/dev/bwi/if_bwivar.h (revision 287196)
+++ head/sys/dev/bwi/if_bwivar.h (revision 287197)
@@ -1,703 +1,705 @@
/*
* Copyright (c) 2007 The DragonFly Project. All rights reserved.
*
* This code is derived from software contributed to The DragonFly Project
* by Sepherosa Ziehau <sepherosa@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of The DragonFly Project nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific, prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $DragonFly: src/sys/dev/netif/bwi/if_bwivar.h,v 1.14 2008/02/15 11:15:38 sephe Exp $
* $FreeBSD$
*/
#ifndef _IF_BWIVAR_H
#define _IF_BWIVAR_H
#define BWI_ALIGN 0x1000
#define BWI_RING_ALIGN BWI_ALIGN
#define BWI_BUS_SPACE_MAXADDR 0x3fffffff
#define BWI_TX_NRING 6
#define BWI_TXRX_NRING 6
#define BWI_TX_NDESC 128
#define BWI_RX_NDESC 64
#define BWI_TXSTATS_NDESC 64
#define BWI_TX_NSPRDESC 2
#define BWI_TX_DATA_RING 1
/* XXX Onoe/Sample/AMRR probably need different configuration */
#define BWI_SHRETRY 7
#define BWI_LGRETRY 4
#define BWI_SHRETRY_FB 3
#define BWI_LGRETRY_FB 2
#define BWI_LED_EVENT_NONE -1
#define BWI_LED_EVENT_POLL 0
#define BWI_LED_EVENT_TX 1
#define BWI_LED_EVENT_RX 2
#define BWI_LED_SLOWDOWN(dur) (dur) = (((dur) * 3) / 2)
enum bwi_txpwrcb_type {
BWI_TXPWR_INIT = 0,
BWI_TXPWR_FORCE = 1,
BWI_TXPWR_CALIB = 2
};
#define BWI_NOISE_FLOOR -95 /* TODO: noise floor calc */
#define BWI_FRAME_MIN_LEN(hdr) \
((hdr) + sizeof(struct ieee80211_frame_ack) + IEEE80211_CRC_LEN)
#define CSR_READ_4(sc, reg) \
bus_space_read_4((sc)->sc_mem_bt, (sc)->sc_mem_bh, (reg))
#define CSR_READ_2(sc, reg) \
bus_space_read_2((sc)->sc_mem_bt, (sc)->sc_mem_bh, (reg))
#define CSR_WRITE_4(sc, reg, val) \
bus_space_write_4((sc)->sc_mem_bt, (sc)->sc_mem_bh, (reg), (val))
#define CSR_WRITE_2(sc, reg, val) \
bus_space_write_2((sc)->sc_mem_bt, (sc)->sc_mem_bh, (reg), (val))
#define CSR_SETBITS_4(sc, reg, bits) \
CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (bits))
#define CSR_SETBITS_2(sc, reg, bits) \
CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (bits))
#define CSR_FILT_SETBITS_4(sc, reg, filt, bits) \
CSR_WRITE_4((sc), (reg), (CSR_READ_4((sc), (reg)) & (filt)) | (bits))
#define CSR_FILT_SETBITS_2(sc, reg, filt, bits) \
CSR_WRITE_2((sc), (reg), (CSR_READ_2((sc), (reg)) & (filt)) | (bits))
#define CSR_CLRBITS_4(sc, reg, bits) \
CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(bits))
#define CSR_CLRBITS_2(sc, reg, bits) \
CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(bits))
#ifdef BWI_DEBUG
#define DPRINTF(sc, dbg, fmt, ...) \
do { \
if ((sc)->sc_debug & (dbg)) \
device_printf((sc)->sc_dev, fmt, __VA_ARGS__); \
} while (0)
#define _DPRINTF(sc, dbg, fmt, ...) \
do { \
if ((sc)->sc_debug & (dbg)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else /* !BWI_DEBUG */
#define DPRINTF(sc, dbg, fmt, ...) ((void)0)
#define _DPRINTF(sc, dbg, fmt, ...) ((void)0)
#endif /* BWI_DEBUG */
struct bwi_desc32 {
/* Little endian */
uint32_t ctrl;
uint32_t addr; /* BWI_DESC32_A_ */
} __packed;
#define BWI_DESC32_A_FUNC_TXRX 0x1
#define BWI_DESC32_A_FUNC_MASK __BITS(31, 30)
#define BWI_DESC32_A_ADDR_MASK __BITS(29, 0)
#define BWI_DESC32_C_BUFLEN_MASK __BITS(12, 0)
#define BWI_DESC32_C_ADDRHI_MASK __BITS(17, 16)
#define BWI_DESC32_C_EOR __BIT(28)
#define BWI_DESC32_C_INTR __BIT(29)
#define BWI_DESC32_C_FRAME_END __BIT(30)
#define BWI_DESC32_C_FRAME_START __BIT(31)
struct bwi_desc64 {
/* Little endian */
uint32_t ctrl0;
uint32_t ctrl1;
uint32_t addr_lo;
uint32_t addr_hi;
} __packed;
struct bwi_rxbuf_hdr {
/* Little endian */
uint16_t rxh_buflen; /* exclude bwi_rxbuf_hdr */
uint8_t rxh_pad1[2];
uint16_t rxh_flags1; /* BWI_RXH_F1_ */
uint8_t rxh_rssi;
uint8_t rxh_sq;
uint16_t rxh_phyinfo; /* BWI_RXH_PHYINFO_ */
uint16_t rxh_flags3; /* BWI_RXH_F3_ */
uint16_t rxh_flags2; /* BWI_RXH_F2_ */
uint16_t rxh_tsf;
uint8_t rxh_pad3[14]; /* Padded to 30bytes */
} __packed;
#define BWI_RXH_F1_BCM2053_RSSI __BIT(14)
#define BWI_RXH_F1_SHPREAMBLE __BIT(7)
#define BWI_RXH_F1_OFDM __BIT(0)
#define BWI_RXH_F2_TYPE2FRAME __BIT(2)
#define BWI_RXH_F3_BCM2050_RSSI __BIT(10)
#define BWI_RXH_PHYINFO_LNAGAIN __BITS(15, 14)
struct bwi_txbuf_hdr {
/* Little endian */
uint32_t txh_mac_ctrl; /* BWI_TXH_MAC_C_ */
uint8_t txh_fc[2];
uint16_t txh_unknown1;
uint16_t txh_phy_ctrl; /* BWI_TXH_PHY_C_ */
uint8_t txh_ivs[16];
uint8_t txh_addr1[IEEE80211_ADDR_LEN];
uint16_t txh_unknown2;
uint8_t txh_rts_fb_plcp[4];
uint16_t txh_rts_fb_duration;
uint8_t txh_fb_plcp[4];
uint16_t txh_fb_duration;
uint8_t txh_pad2[2];
uint16_t txh_id; /* BWI_TXH_ID_ */
uint16_t txh_unknown3;
uint8_t txh_rts_plcp[6];
uint8_t txh_rts_fc[2];
uint16_t txh_rts_duration;
uint8_t txh_rts_ra[IEEE80211_ADDR_LEN];
uint8_t txh_rts_ta[IEEE80211_ADDR_LEN];
uint8_t txh_pad3[2];
uint8_t txh_plcp[6];
} __packed;
#define BWI_TXH_ID_RING_MASK __BITS(15, 13)
#define BWI_TXH_ID_IDX_MASK __BITS(12, 0)
#define BWI_TXH_PHY_C_OFDM __BIT(0)
#define BWI_TXH_PHY_C_SHPREAMBLE __BIT(4)
#define BWI_TXH_PHY_C_ANTMODE_MASK __BITS(9, 8)
#define BWI_TXH_MAC_C_ACK __BIT(0)
#define BWI_TXH_MAC_C_FIRST_FRAG __BIT(3)
#define BWI_TXH_MAC_C_HWSEQ __BIT(4)
#define BWI_TXH_MAC_C_FB_OFDM __BIT(8)
struct bwi_txstats {
/* Little endian */
uint8_t txs_pad1[4];
uint16_t txs_id;
uint8_t txs_flags; /* BWI_TXS_F_ */
uint8_t txs_txcnt; /* BWI_TXS_TXCNT_ */
uint8_t txs_pad2[2];
uint16_t txs_seq;
uint16_t txs_unknown;
uint8_t txs_pad3[2]; /* Padded to 16bytes */
} __packed;
#define BWI_TXS_TXCNT_DATA __BITS(7, 4)
#define BWI_TXS_F_ACKED __BIT(0)
#define BWI_TXS_F_PENDING __BIT(5)
struct bwi_ring_data {
uint32_t rdata_txrx_ctrl;
bus_dmamap_t rdata_dmap;
bus_addr_t rdata_paddr;
void *rdata_desc;
};
struct bwi_txbuf {
struct mbuf *tb_mbuf;
bus_dmamap_t tb_dmap;
struct ieee80211_node *tb_ni;
int tb_rate[2];
};
struct bwi_txbuf_data {
struct bwi_txbuf tbd_buf[BWI_TX_NDESC];
int tbd_used;
int tbd_idx;
};
struct bwi_rxbuf {
struct mbuf *rb_mbuf;
bus_addr_t rb_paddr;
bus_dmamap_t rb_dmap;
};
struct bwi_rxbuf_data {
struct bwi_rxbuf rbd_buf[BWI_RX_NDESC];
bus_dmamap_t rbd_tmp_dmap;
int rbd_idx;
};
struct bwi_txstats_data {
bus_dma_tag_t stats_ring_dtag;
bus_dmamap_t stats_ring_dmap;
bus_addr_t stats_ring_paddr;
void *stats_ring;
bus_dma_tag_t stats_dtag;
bus_dmamap_t stats_dmap;
bus_addr_t stats_paddr;
struct bwi_txstats *stats;
uint32_t stats_ctrl_base;
int stats_idx;
};
struct bwi_fwhdr {
/* Big endian */
uint8_t fw_type; /* BWI_FW_T_ */
uint8_t fw_gen; /* BWI_FW_GEN */
uint8_t fw_pad[2];
uint32_t fw_size;
#define fw_iv_cnt fw_size
} __packed;
#define BWI_FWHDR_SZ sizeof(struct bwi_fwhdr)
#define BWI_FW_T_UCODE 'u'
#define BWI_FW_T_PCM 'p'
#define BWI_FW_T_IV 'i'
#define BWI_FW_GEN_1 1
#define BWI_FW_VERSION3 3
#define BWI_FW_VERSION4 4
#define BWI_FW_VERSION3_REVMAX 0x128
#define BWI_FW_PATH "bwi_v%d_"
#define BWI_FW_STUB_PATH BWI_FW_PATH "ucode"
#define BWI_FW_UCODE_PATH BWI_FW_PATH "ucode%d"
#define BWI_FW_PCM_PATH BWI_FW_PATH "pcm%d"
#define BWI_FW_IV_PATH BWI_FW_PATH "b0g0initvals%d"
#define BWI_FW_IV_EXT_PATH BWI_FW_PATH "b0g0bsinitvals%d"
struct bwi_fw_iv {
/* Big endian */
uint16_t iv_ofs;
union {
uint32_t val32;
uint16_t val16;
} iv_val;
} __packed;
#define BWI_FW_IV_OFS_MASK __BITS(14, 0)
#define BWI_FW_IV_IS_32BIT __BIT(15)
struct bwi_led {
uint8_t l_flags; /* BWI_LED_F_ */
uint8_t l_act; /* BWI_LED_ACT_ */
uint8_t l_mask;
};
#define BWI_LED_F_ACTLOW 0x1
#define BWI_LED_F_BLINK 0x2
#define BWI_LED_F_POLLABLE 0x4
#define BWI_LED_F_SLOW 0x8
enum bwi_clock_mode {
BWI_CLOCK_MODE_SLOW,
BWI_CLOCK_MODE_FAST,
BWI_CLOCK_MODE_DYN
};
struct bwi_regwin {
uint32_t rw_flags; /* BWI_REGWIN_F_ */
uint16_t rw_type; /* BWI_REGWIN_T_ */
uint8_t rw_id;
uint8_t rw_rev;
};
#define BWI_REGWIN_F_EXIST 0x1
#define BWI_CREATE_REGWIN(rw, id, type, rev) \
do { \
(rw)->rw_flags = BWI_REGWIN_F_EXIST; \
(rw)->rw_type = (type); \
(rw)->rw_id = (id); \
(rw)->rw_rev = (rev); \
} while (0)
#define BWI_REGWIN_EXIST(rw) ((rw)->rw_flags & BWI_REGWIN_F_EXIST)
#define BWI_GPIO_REGWIN(sc) \
(BWI_REGWIN_EXIST(&(sc)->sc_com_regwin) ? \
&(sc)->sc_com_regwin : &(sc)->sc_bus_regwin)
struct bwi_mac;
struct bwi_phy {
enum ieee80211_phymode phy_mode;
int phy_rev;
int phy_version;
uint32_t phy_flags; /* BWI_PHY_F_ */
uint16_t phy_tbl_ctrl;
uint16_t phy_tbl_data_lo;
uint16_t phy_tbl_data_hi;
void (*phy_init)(struct bwi_mac *);
};
#define BWI_PHY_F_CALIBRATED 0x1
#define BWI_PHY_F_LINKED 0x2
#define BWI_CLEAR_PHY_FLAGS (BWI_PHY_F_CALIBRATED)
/* TX power control */
struct bwi_tpctl {
uint16_t bbp_atten; /* BBP attenuation: 4bits */
uint16_t rf_atten; /* RF attenuation */
uint16_t tp_ctrl1; /* ??: 3bits */
uint16_t tp_ctrl2; /* ??: 4bits */
};
#define BWI_RF_ATTEN_FACTOR 4
#define BWI_RF_ATTEN_MAX0 9
#define BWI_RF_ATTEN_MAX1 31
#define BWI_BBP_ATTEN_MAX 11
#define BWI_TPCTL1_MAX 7
struct bwi_rf_lo {
int8_t ctrl_lo;
int8_t ctrl_hi;
};
struct bwi_rf {
uint16_t rf_type; /* BWI_RF_T_ */
uint16_t rf_manu;
int rf_rev;
uint32_t rf_flags; /* BWI_RF_F_ */
#define BWI_RFLO_MAX 56
struct bwi_rf_lo rf_lo[BWI_RFLO_MAX];
uint8_t rf_lo_used[8];
#define BWI_INVALID_NRSSI -1000
int16_t rf_nrssi[2]; /* Narrow RSSI */
int32_t rf_nrssi_slope;
#define BWI_NRSSI_TBLSZ 64
int8_t rf_nrssi_table[BWI_NRSSI_TBLSZ];
uint16_t rf_lo_gain; /* loopback gain */
uint16_t rf_rx_gain; /* TRSW RX gain */
uint16_t rf_calib; /* RF calibration value */
u_int rf_curchan; /* current channel */
uint16_t rf_ctrl_rd;
int rf_ctrl_adj;
void (*rf_off)(struct bwi_mac *);
void (*rf_on)(struct bwi_mac *);
void (*rf_set_nrssi_thr)(struct bwi_mac *);
void (*rf_calc_nrssi_slope)(struct bwi_mac *);
int (*rf_calc_rssi)
(struct bwi_mac *,
const struct bwi_rxbuf_hdr *);
int (*rf_calc_noise)(struct bwi_mac *);
void (*rf_lo_update)(struct bwi_mac *);
#define BWI_TSSI_MAX 64
int8_t rf_txpower_map0[BWI_TSSI_MAX];
/* Indexed by TSSI */
int rf_idle_tssi0;
int8_t rf_txpower_map[BWI_TSSI_MAX];
int rf_idle_tssi;
int rf_base_tssi;
int rf_txpower_max; /* dBm */
int rf_ant_mode; /* BWI_ANT_MODE_ */
};
#define BWI_RF_F_INITED 0x1
#define BWI_RF_F_ON 0x2
#define BWI_RF_CLEAR_FLAGS (BWI_RF_F_INITED)
#define BWI_ANT_MODE_0 0
#define BWI_ANT_MODE_1 1
#define BWI_ANT_MODE_UNKN 2
#define BWI_ANT_MODE_AUTO 3
struct bwi_softc;
struct firmware;
struct bwi_mac {
struct bwi_regwin mac_regwin; /* MUST be first field */
#define mac_rw_flags mac_regwin.rw_flags
#define mac_type mac_regwin.rw_type
#define mac_id mac_regwin.rw_id
#define mac_rev mac_regwin.rw_rev
struct bwi_softc *mac_sc;
struct bwi_phy mac_phy; /* PHY I/F */
struct bwi_rf mac_rf; /* RF I/F */
struct bwi_tpctl mac_tpctl; /* TX power control */
uint32_t mac_flags; /* BWI_MAC_F_ */
const struct firmware *mac_stub;
const struct firmware *mac_ucode;
const struct firmware *mac_pcm;
const struct firmware *mac_iv;
const struct firmware *mac_iv_ext;
};
#define BWI_MAC_F_BSWAP 0x1
#define BWI_MAC_F_TPCTL_INITED 0x2
#define BWI_MAC_F_HAS_TXSTATS 0x4
#define BWI_MAC_F_INITED 0x8
#define BWI_MAC_F_ENABLED 0x10
#define BWI_MAC_F_LOCKED 0x20 /* for debug */
#define BWI_MAC_F_TPCTL_ERROR 0x40
#define BWI_MAC_F_PHYE_RESET 0x80
#define BWI_CREATE_MAC(mac, sc, id, rev) \
do { \
BWI_CREATE_REGWIN(&(mac)->mac_regwin, \
(id), \
BWI_REGWIN_T_MAC, \
(rev)); \
(mac)->mac_sc = (sc); \
} while (0)
#define BWI_MAC_MAX 2
#define BWI_LED_MAX 4
enum bwi_bus_space {
BWI_BUS_SPACE_30BIT = 1,
BWI_BUS_SPACE_32BIT,
BWI_BUS_SPACE_64BIT
};
#define BWI_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct bwi_tx_radiotap_hdr {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
};
#define BWI_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
struct bwi_rx_radiotap_hdr {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsf;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
/* TODO: sq */
};
struct bwi_vap {
struct ieee80211vap bv_vap;
int (*bv_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define BWI_VAP(vap) ((struct bwi_vap *)(vap))
struct bwi_softc {
- struct ifnet *sc_ifp;
uint32_t sc_flags; /* BWI_F_ */
device_t sc_dev;
struct mtx sc_mtx;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
int sc_invalid;
uint32_t sc_cap; /* BWI_CAP_ */
uint16_t sc_bbp_id; /* BWI_BBPID_ */
uint8_t sc_bbp_rev;
uint8_t sc_bbp_pkg;
uint8_t sc_pci_revid;
uint16_t sc_pci_did;
uint16_t sc_pci_subvid;
uint16_t sc_pci_subdid;
uint16_t sc_card_flags; /* BWI_CARD_F_ */
uint16_t sc_pwron_delay;
int sc_locale;
int sc_irq_rid;
struct resource *sc_irq_res;
void *sc_irq_handle;
int sc_mem_rid;
struct resource *sc_mem_res;
bus_space_tag_t sc_mem_bt;
bus_space_handle_t sc_mem_bh;
struct callout sc_calib_ch;
struct callout sc_watchdog_timer;
struct bwi_regwin *sc_cur_regwin;
struct bwi_regwin sc_com_regwin;
struct bwi_regwin sc_bus_regwin;
int sc_nmac;
struct bwi_mac sc_mac[BWI_MAC_MAX];
int sc_rx_rate;
int sc_tx_rate;
enum bwi_txpwrcb_type sc_txpwrcb_type;
int sc_led_blinking;
int sc_led_ticks;
struct bwi_led *sc_blink_led;
struct callout sc_led_blink_ch;
int sc_led_blink_offdur;
struct bwi_led sc_leds[BWI_LED_MAX];
enum bwi_bus_space sc_bus_space;
bus_dma_tag_t sc_parent_dtag;
bus_dma_tag_t sc_buf_dtag;
struct bwi_txbuf_data sc_tx_bdata[BWI_TX_NRING];
struct bwi_rxbuf_data sc_rx_bdata;
bus_dma_tag_t sc_txring_dtag;
struct bwi_ring_data sc_tx_rdata[BWI_TX_NRING];
bus_dma_tag_t sc_rxring_dtag;
struct bwi_ring_data sc_rx_rdata;
struct bwi_txstats_data *sc_txstats;
int sc_tx_timer;
const struct ieee80211_rate_table *sc_rates;
struct bwi_tx_radiotap_hdr sc_tx_th;
struct bwi_rx_radiotap_hdr sc_rx_th;
struct taskqueue *sc_tq;
struct task sc_restart_task;
int (*sc_init_tx_ring)(struct bwi_softc *, int);
void (*sc_free_tx_ring)(struct bwi_softc *, int);
int (*sc_init_rx_ring)(struct bwi_softc *);
void (*sc_free_rx_ring)(struct bwi_softc *);
int (*sc_init_txstats)(struct bwi_softc *);
void (*sc_free_txstats)(struct bwi_softc *);
void (*sc_setup_rxdesc)
(struct bwi_softc *, int, bus_addr_t, int);
int (*sc_rxeof)(struct bwi_softc *);
void (*sc_setup_txdesc)
(struct bwi_softc *, struct bwi_ring_data *,
int, bus_addr_t, int);
void (*sc_start_tx)
(struct bwi_softc *, uint32_t, int);
void (*sc_txeof_status)(struct bwi_softc *);
/* Sysctl variables */
int sc_fw_version; /* BWI_FW_VERSION[34] */
int sc_dwell_time; /* milliseconds */
int sc_led_idle;
int sc_led_blink;
int sc_txpwr_calib;
uint32_t sc_debug; /* BWI_DBG_ */
};
#define BWI_F_BUS_INITED 0x1
#define BWI_F_PROMISC 0x2
#define BWI_F_STOP 0x4
+#define BWI_F_RUNNING 0x8
#define BWI_DBG_MAC 0x00000001
#define BWI_DBG_RF 0x00000002
#define BWI_DBG_PHY 0x00000004
#define BWI_DBG_MISC 0x00000008
#define BWI_DBG_ATTACH 0x00000010
#define BWI_DBG_INIT 0x00000020
#define BWI_DBG_FIRMWARE 0x00000040
#define BWI_DBG_80211 0x00000080
#define BWI_DBG_TXPOWER 0x00000100
#define BWI_DBG_INTR 0x00000200
#define BWI_DBG_RX 0x00000400
#define BWI_DBG_TX 0x00000800
#define BWI_DBG_TXEOF 0x00001000
#define BWI_DBG_LED 0x00002000
#define BWI_LOCK_INIT(sc) \
mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->sc_dev), \
MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE)
#define BWI_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
#define BWI_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define BWI_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define BWI_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
int bwi_attach(struct bwi_softc *);
int bwi_detach(struct bwi_softc *);
void bwi_suspend(struct bwi_softc *);
void bwi_resume(struct bwi_softc *);
int bwi_shutdown(struct bwi_softc *);
void bwi_intr(void *);
int bwi_bus_init(struct bwi_softc *, struct bwi_mac *mac);
uint16_t bwi_read_sprom(struct bwi_softc *, uint16_t);
int bwi_regwin_switch(struct bwi_softc *, struct bwi_regwin *,
struct bwi_regwin **);
int bwi_regwin_is_enabled(struct bwi_softc *, struct bwi_regwin *);
void bwi_regwin_enable(struct bwi_softc *, struct bwi_regwin *,
uint32_t);
void bwi_regwin_disable(struct bwi_softc *, struct bwi_regwin *,
uint32_t);
#define abs(a) __builtin_abs(a)
/* XXX does not belong here */
struct ieee80211_ds_plcp_hdr {
uint8_t i_signal;
uint8_t i_service;
uint16_t i_length;
uint16_t i_crc;
} __packed;
#endif /* !_IF_BWIVAR_H */
Index: head/sys/dev/bwn/if_bwn.c
===================================================================
--- head/sys/dev/bwn/if_bwn.c (revision 287196)
+++ head/sys/dev/bwn/if_bwn.c (revision 287197)
@@ -1,14238 +1,14090 @@
/*-
* Copyright (c) 2009-2010 Weongyo Jeong <weongyo@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* The Broadcom Wireless LAN controller driver.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/endian.h>
#include <sys/errno.h>
#include <sys/firmware.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/siba/siba_ids.h>
#include <dev/siba/sibareg.h>
#include <dev/siba/sibavar.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_phy.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/bwn/if_bwnreg.h>
#include <dev/bwn/if_bwnvar.h>
static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0,
"Broadcom driver parameters");
/*
* Tunable & sysctl variables.
*/
#ifdef BWN_DEBUG
static int bwn_debug = 0;
SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0,
"Broadcom debugging printfs");
enum {
BWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
BWN_DEBUG_RECV = 0x00000002, /* basic recv operation */
BWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */
BWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */
BWN_DEBUG_RESET = 0x00000010, /* reset processing */
BWN_DEBUG_OPS = 0x00000020, /* bwn_ops processing */
BWN_DEBUG_BEACON = 0x00000040, /* beacon handling */
BWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */
BWN_DEBUG_INTR = 0x00000100, /* ISR */
BWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */
BWN_DEBUG_NODE = 0x00000400, /* node management */
BWN_DEBUG_LED = 0x00000800, /* led management */
BWN_DEBUG_CMD = 0x00001000, /* cmd submission */
BWN_DEBUG_LO = 0x00002000, /* LO */
BWN_DEBUG_FW = 0x00004000, /* firmware */
BWN_DEBUG_WME = 0x00008000, /* WME */
BWN_DEBUG_RF = 0x00010000, /* RF */
BWN_DEBUG_FATAL = 0x80000000, /* fatal errors */
BWN_DEBUG_ANY = 0xffffffff
};
#define DPRINTF(sc, m, fmt, ...) do { \
if (sc->sc_debug & (m)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
#endif
static int bwn_bfp = 0; /* use "Bad Frames Preemption" */
SYSCTL_INT(_hw_bwn, OID_AUTO, bfp, CTLFLAG_RW, &bwn_bfp, 0,
"uses Bad Frames Preemption");
static int bwn_bluetooth = 1;
SYSCTL_INT(_hw_bwn, OID_AUTO, bluetooth, CTLFLAG_RW, &bwn_bluetooth, 0,
"turns on Bluetooth Coexistence");
static int bwn_hwpctl = 0;
SYSCTL_INT(_hw_bwn, OID_AUTO, hwpctl, CTLFLAG_RW, &bwn_hwpctl, 0,
"uses H/W power control");
static int bwn_msi_disable = 0; /* MSI disabled */
TUNABLE_INT("hw.bwn.msi_disable", &bwn_msi_disable);
static int bwn_usedma = 1;
SYSCTL_INT(_hw_bwn, OID_AUTO, usedma, CTLFLAG_RD, &bwn_usedma, 0,
"uses DMA");
TUNABLE_INT("hw.bwn.usedma", &bwn_usedma);
static int bwn_wme = 1;
SYSCTL_INT(_hw_bwn, OID_AUTO, wme, CTLFLAG_RW, &bwn_wme, 0,
"uses WME support");
-static int bwn_attach_pre(struct bwn_softc *);
+static void bwn_attach_pre(struct bwn_softc *);
static int bwn_attach_post(struct bwn_softc *);
static void bwn_sprom_bugfixes(device_t);
-static void bwn_init(void *);
-static int bwn_init_locked(struct bwn_softc *);
-static int bwn_ioctl(struct ifnet *, u_long, caddr_t);
-static void bwn_start(struct ifnet *);
+static int bwn_init(struct bwn_softc *);
+static void bwn_parent(struct ieee80211com *);
+static void bwn_start(struct bwn_softc *);
+static int bwn_transmit(struct ieee80211com *, struct mbuf *);
static int bwn_attach_core(struct bwn_mac *);
static void bwn_reset_core(struct bwn_mac *, uint32_t);
static int bwn_phy_getinfo(struct bwn_mac *, int);
static int bwn_chiptest(struct bwn_mac *);
static int bwn_setup_channels(struct bwn_mac *, int, int);
static int bwn_phy_g_attach(struct bwn_mac *);
static void bwn_phy_g_detach(struct bwn_mac *);
static void bwn_phy_g_init_pre(struct bwn_mac *);
static int bwn_phy_g_prepare_hw(struct bwn_mac *);
static int bwn_phy_g_init(struct bwn_mac *);
static void bwn_phy_g_exit(struct bwn_mac *);
static uint16_t bwn_phy_g_read(struct bwn_mac *, uint16_t);
static void bwn_phy_g_write(struct bwn_mac *, uint16_t,
uint16_t);
static uint16_t bwn_phy_g_rf_read(struct bwn_mac *, uint16_t);
static void bwn_phy_g_rf_write(struct bwn_mac *, uint16_t,
uint16_t);
static int bwn_phy_g_hwpctl(struct bwn_mac *);
static void bwn_phy_g_rf_onoff(struct bwn_mac *, int);
static int bwn_phy_g_switch_channel(struct bwn_mac *, uint32_t);
static uint32_t bwn_phy_g_get_default_chan(struct bwn_mac *);
static void bwn_phy_g_set_antenna(struct bwn_mac *, int);
static int bwn_phy_g_im(struct bwn_mac *, int);
static int bwn_phy_g_recalc_txpwr(struct bwn_mac *, int);
static void bwn_phy_g_set_txpwr(struct bwn_mac *);
static void bwn_phy_g_task_15s(struct bwn_mac *);
static void bwn_phy_g_task_60s(struct bwn_mac *);
static uint16_t bwn_phy_g_txctl(struct bwn_mac *);
static void bwn_phy_switch_analog(struct bwn_mac *, int);
static uint16_t bwn_shm_read_2(struct bwn_mac *, uint16_t, uint16_t);
static void bwn_shm_write_2(struct bwn_mac *, uint16_t, uint16_t,
uint16_t);
static uint32_t bwn_shm_read_4(struct bwn_mac *, uint16_t, uint16_t);
static void bwn_shm_write_4(struct bwn_mac *, uint16_t, uint16_t,
uint32_t);
static void bwn_shm_ctlword(struct bwn_mac *, uint16_t,
uint16_t);
static void bwn_addchannels(struct ieee80211_channel [], int, int *,
const struct bwn_channelinfo *, int);
static int bwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void bwn_updateslot(struct ieee80211com *);
static void bwn_update_promisc(struct ieee80211com *);
static void bwn_wme_init(struct bwn_mac *);
static int bwn_wme_update(struct ieee80211com *);
static void bwn_wme_clear(struct bwn_softc *);
static void bwn_wme_load(struct bwn_mac *);
static void bwn_wme_loadparams(struct bwn_mac *,
const struct wmeParams *, uint16_t);
static void bwn_scan_start(struct ieee80211com *);
static void bwn_scan_end(struct ieee80211com *);
static void bwn_set_channel(struct ieee80211com *);
static struct ieee80211vap *bwn_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void bwn_vap_delete(struct ieee80211vap *);
-static void bwn_stop(struct bwn_softc *, int);
-static void bwn_stop_locked(struct bwn_softc *, int);
+static void bwn_stop(struct bwn_softc *);
static int bwn_core_init(struct bwn_mac *);
static void bwn_core_start(struct bwn_mac *);
static void bwn_core_exit(struct bwn_mac *);
static void bwn_bt_disable(struct bwn_mac *);
static int bwn_chip_init(struct bwn_mac *);
static uint64_t bwn_hf_read(struct bwn_mac *);
static void bwn_hf_write(struct bwn_mac *, uint64_t);
static void bwn_set_txretry(struct bwn_mac *, int, int);
static void bwn_rate_init(struct bwn_mac *);
static void bwn_set_phytxctl(struct bwn_mac *);
static void bwn_spu_setdelay(struct bwn_mac *, int);
static void bwn_bt_enable(struct bwn_mac *);
static void bwn_set_macaddr(struct bwn_mac *);
static void bwn_crypt_init(struct bwn_mac *);
static void bwn_chip_exit(struct bwn_mac *);
static int bwn_fw_fillinfo(struct bwn_mac *);
static int bwn_fw_loaducode(struct bwn_mac *);
static int bwn_gpio_init(struct bwn_mac *);
static int bwn_fw_loadinitvals(struct bwn_mac *);
static int bwn_phy_init(struct bwn_mac *);
static void bwn_set_txantenna(struct bwn_mac *, int);
static void bwn_set_opmode(struct bwn_mac *);
static void bwn_rate_write(struct bwn_mac *, uint16_t, int);
static uint8_t bwn_plcp_getcck(const uint8_t);
static uint8_t bwn_plcp_getofdm(const uint8_t);
static void bwn_pio_init(struct bwn_mac *);
static uint16_t bwn_pio_idx2base(struct bwn_mac *, int);
static void bwn_pio_set_txqueue(struct bwn_mac *, struct bwn_pio_txqueue *,
int);
static void bwn_pio_setupqueue_rx(struct bwn_mac *,
struct bwn_pio_rxqueue *, int);
static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *);
static uint16_t bwn_pio_read_2(struct bwn_mac *, struct bwn_pio_txqueue *,
uint16_t);
static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *);
static int bwn_pio_rx(struct bwn_pio_rxqueue *);
static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *);
static void bwn_pio_handle_txeof(struct bwn_mac *,
const struct bwn_txstatus *);
static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *, uint16_t);
static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *, uint16_t);
static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *, uint16_t,
uint16_t);
static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *, uint16_t,
uint32_t);
static int bwn_pio_tx_start(struct bwn_mac *, struct ieee80211_node *,
struct mbuf *);
static struct bwn_pio_txqueue *bwn_pio_select(struct bwn_mac *, uint8_t);
static uint32_t bwn_pio_write_multi_4(struct bwn_mac *,
struct bwn_pio_txqueue *, uint32_t, const void *, int);
static void bwn_pio_write_4(struct bwn_mac *, struct bwn_pio_txqueue *,
uint16_t, uint32_t);
static uint16_t bwn_pio_write_multi_2(struct bwn_mac *,
struct bwn_pio_txqueue *, uint16_t, const void *, int);
static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *,
struct bwn_pio_txqueue *, uint16_t, struct mbuf *);
static struct bwn_pio_txqueue *bwn_pio_parse_cookie(struct bwn_mac *,
uint16_t, struct bwn_pio_txpkt **);
static void bwn_dma_init(struct bwn_mac *);
static void bwn_dma_rxdirectfifo(struct bwn_mac *, int, uint8_t);
static int bwn_dma_mask2type(uint64_t);
static uint64_t bwn_dma_mask(struct bwn_mac *);
static uint16_t bwn_dma_base(int, int);
static void bwn_dma_ringfree(struct bwn_dma_ring **);
static void bwn_dma_32_getdesc(struct bwn_dma_ring *,
int, struct bwn_dmadesc_generic **,
struct bwn_dmadesc_meta **);
static void bwn_dma_32_setdesc(struct bwn_dma_ring *,
struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int,
int, int);
static void bwn_dma_32_start_transfer(struct bwn_dma_ring *, int);
static void bwn_dma_32_suspend(struct bwn_dma_ring *);
static void bwn_dma_32_resume(struct bwn_dma_ring *);
static int bwn_dma_32_get_curslot(struct bwn_dma_ring *);
static void bwn_dma_32_set_curslot(struct bwn_dma_ring *, int);
static void bwn_dma_64_getdesc(struct bwn_dma_ring *,
int, struct bwn_dmadesc_generic **,
struct bwn_dmadesc_meta **);
static void bwn_dma_64_setdesc(struct bwn_dma_ring *,
struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int,
int, int);
static void bwn_dma_64_start_transfer(struct bwn_dma_ring *, int);
static void bwn_dma_64_suspend(struct bwn_dma_ring *);
static void bwn_dma_64_resume(struct bwn_dma_ring *);
static int bwn_dma_64_get_curslot(struct bwn_dma_ring *);
static void bwn_dma_64_set_curslot(struct bwn_dma_ring *, int);
static int bwn_dma_allocringmemory(struct bwn_dma_ring *);
static void bwn_dma_setup(struct bwn_dma_ring *);
static void bwn_dma_free_ringmemory(struct bwn_dma_ring *);
static void bwn_dma_cleanup(struct bwn_dma_ring *);
static void bwn_dma_free_descbufs(struct bwn_dma_ring *);
static int bwn_dma_tx_reset(struct bwn_mac *, uint16_t, int);
static void bwn_dma_rx(struct bwn_dma_ring *);
static int bwn_dma_rx_reset(struct bwn_mac *, uint16_t, int);
static void bwn_dma_free_descbuf(struct bwn_dma_ring *,
struct bwn_dmadesc_meta *);
static void bwn_dma_set_redzone(struct bwn_dma_ring *, struct mbuf *);
static int bwn_dma_gettype(struct bwn_mac *);
static void bwn_dma_ring_addr(void *, bus_dma_segment_t *, int, int);
static int bwn_dma_freeslot(struct bwn_dma_ring *);
static int bwn_dma_nextslot(struct bwn_dma_ring *, int);
static void bwn_dma_rxeof(struct bwn_dma_ring *, int *);
static int bwn_dma_newbuf(struct bwn_dma_ring *,
struct bwn_dmadesc_generic *, struct bwn_dmadesc_meta *,
int);
static void bwn_dma_buf_addr(void *, bus_dma_segment_t *, int,
bus_size_t, int);
static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *, struct mbuf *);
static void bwn_dma_handle_txeof(struct bwn_mac *,
const struct bwn_txstatus *);
static int bwn_dma_tx_start(struct bwn_mac *, struct ieee80211_node *,
struct mbuf *);
static int bwn_dma_getslot(struct bwn_dma_ring *);
static struct bwn_dma_ring *bwn_dma_select(struct bwn_mac *,
uint8_t);
static int bwn_dma_attach(struct bwn_mac *);
static struct bwn_dma_ring *bwn_dma_ringsetup(struct bwn_mac *,
int, int, int);
static struct bwn_dma_ring *bwn_dma_parse_cookie(struct bwn_mac *,
const struct bwn_txstatus *, uint16_t, int *);
static void bwn_dma_free(struct bwn_mac *);
static void bwn_phy_g_init_sub(struct bwn_mac *);
static uint8_t bwn_has_hwpctl(struct bwn_mac *);
static void bwn_phy_init_b5(struct bwn_mac *);
static void bwn_phy_init_b6(struct bwn_mac *);
static void bwn_phy_init_a(struct bwn_mac *);
static void bwn_loopback_calcgain(struct bwn_mac *);
static uint16_t bwn_rf_init_bcm2050(struct bwn_mac *);
static void bwn_lo_g_init(struct bwn_mac *);
static void bwn_lo_g_adjust(struct bwn_mac *);
static void bwn_lo_get_powervector(struct bwn_mac *);
static struct bwn_lo_calib *bwn_lo_calibset(struct bwn_mac *,
const struct bwn_bbatt *, const struct bwn_rfatt *);
static void bwn_lo_write(struct bwn_mac *, struct bwn_loctl *);
static void bwn_phy_hwpctl_init(struct bwn_mac *);
static void bwn_phy_g_switch_chan(struct bwn_mac *, int, uint8_t);
static void bwn_phy_g_set_txpwr_sub(struct bwn_mac *,
const struct bwn_bbatt *, const struct bwn_rfatt *,
uint8_t);
static void bwn_phy_g_set_bbatt(struct bwn_mac *, uint16_t);
static uint16_t bwn_rf_2050_rfoverval(struct bwn_mac *, uint16_t, uint32_t);
static void bwn_spu_workaround(struct bwn_mac *, uint8_t);
static void bwn_wa_init(struct bwn_mac *);
static void bwn_ofdmtab_write_2(struct bwn_mac *, uint16_t, uint16_t,
uint16_t);
static void bwn_dummy_transmission(struct bwn_mac *, int, int);
static void bwn_ofdmtab_write_4(struct bwn_mac *, uint16_t, uint16_t,
uint32_t);
static void bwn_gtab_write(struct bwn_mac *, uint16_t, uint16_t,
uint16_t);
static void bwn_ram_write(struct bwn_mac *, uint16_t, uint32_t);
static void bwn_mac_suspend(struct bwn_mac *);
static void bwn_mac_enable(struct bwn_mac *);
static void bwn_psctl(struct bwn_mac *, uint32_t);
static int16_t bwn_nrssi_read(struct bwn_mac *, uint16_t);
static void bwn_nrssi_offset(struct bwn_mac *);
static void bwn_nrssi_threshold(struct bwn_mac *);
static void bwn_nrssi_slope_11g(struct bwn_mac *);
static void bwn_set_all_gains(struct bwn_mac *, int16_t, int16_t,
int16_t);
static void bwn_set_original_gains(struct bwn_mac *);
static void bwn_hwpctl_early_init(struct bwn_mac *);
static void bwn_hwpctl_init_gphy(struct bwn_mac *);
static uint16_t bwn_phy_g_chan2freq(uint8_t);
static int bwn_fw_gets(struct bwn_mac *, enum bwn_fwtype);
static int bwn_fw_get(struct bwn_mac *, enum bwn_fwtype,
const char *, struct bwn_fwfile *);
static void bwn_release_firmware(struct bwn_mac *);
static void bwn_do_release_fw(struct bwn_fwfile *);
static uint16_t bwn_fwcaps_read(struct bwn_mac *);
static int bwn_fwinitvals_write(struct bwn_mac *,
const struct bwn_fwinitvals *, size_t, size_t);
static int bwn_switch_channel(struct bwn_mac *, int);
static uint16_t bwn_ant2phy(int);
static void bwn_mac_write_bssid(struct bwn_mac *);
static void bwn_mac_setfilter(struct bwn_mac *, uint16_t,
const uint8_t *);
static void bwn_key_dowrite(struct bwn_mac *, uint8_t, uint8_t,
const uint8_t *, size_t, const uint8_t *);
static void bwn_key_macwrite(struct bwn_mac *, uint8_t,
const uint8_t *);
static void bwn_key_write(struct bwn_mac *, uint8_t, uint8_t,
const uint8_t *);
static void bwn_phy_exit(struct bwn_mac *);
static void bwn_core_stop(struct bwn_mac *);
static int bwn_switch_band(struct bwn_softc *,
struct ieee80211_channel *);
static void bwn_phy_reset(struct bwn_mac *);
static int bwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void bwn_set_pretbtt(struct bwn_mac *);
static int bwn_intr(void *);
static void bwn_intrtask(void *, int);
static void bwn_restart(struct bwn_mac *, const char *);
static void bwn_intr_ucode_debug(struct bwn_mac *);
static void bwn_intr_tbtt_indication(struct bwn_mac *);
static void bwn_intr_atim_end(struct bwn_mac *);
static void bwn_intr_beacon(struct bwn_mac *);
static void bwn_intr_pmq(struct bwn_mac *);
static void bwn_intr_noise(struct bwn_mac *);
static void bwn_intr_txeof(struct bwn_mac *);
static void bwn_hwreset(void *, int);
static void bwn_handle_fwpanic(struct bwn_mac *);
static void bwn_load_beacon0(struct bwn_mac *);
static void bwn_load_beacon1(struct bwn_mac *);
static uint32_t bwn_jssi_read(struct bwn_mac *);
static void bwn_noise_gensample(struct bwn_mac *);
static void bwn_handle_txeof(struct bwn_mac *,
const struct bwn_txstatus *);
static void bwn_rxeof(struct bwn_mac *, struct mbuf *, const void *);
static void bwn_phy_txpower_check(struct bwn_mac *, uint32_t);
-static void bwn_start_locked(struct ifnet *);
static int bwn_tx_start(struct bwn_softc *, struct ieee80211_node *,
struct mbuf *);
static int bwn_tx_isfull(struct bwn_softc *, struct mbuf *);
static int bwn_set_txhdr(struct bwn_mac *,
struct ieee80211_node *, struct mbuf *, struct bwn_txhdr *,
uint16_t);
static void bwn_plcp_genhdr(struct bwn_plcp4 *, const uint16_t,
const uint8_t);
static uint8_t bwn_antenna_sanitize(struct bwn_mac *, uint8_t);
static uint8_t bwn_get_fbrate(uint8_t);
static int bwn_phy_shm_tssi_read(struct bwn_mac *, uint16_t);
static void bwn_phy_g_setatt(struct bwn_mac *, int *, int *);
static void bwn_phy_lock(struct bwn_mac *);
static void bwn_phy_unlock(struct bwn_mac *);
static void bwn_rf_lock(struct bwn_mac *);
static void bwn_rf_unlock(struct bwn_mac *);
static void bwn_txpwr(void *, int);
static void bwn_tasks(void *);
static void bwn_task_15s(struct bwn_mac *);
static void bwn_task_30s(struct bwn_mac *);
static void bwn_task_60s(struct bwn_mac *);
static int bwn_plcp_get_ofdmrate(struct bwn_mac *, struct bwn_plcp6 *,
uint8_t);
static int bwn_plcp_get_cckrate(struct bwn_mac *, struct bwn_plcp6 *);
static void bwn_rx_radiotap(struct bwn_mac *, struct mbuf *,
const struct bwn_rxhdr4 *, struct bwn_plcp6 *, int,
int, int);
static void bwn_tsf_read(struct bwn_mac *, uint64_t *);
static void bwn_phy_g_dc_lookup_init(struct bwn_mac *, uint8_t);
static void bwn_set_slot_time(struct bwn_mac *, uint16_t);
static void bwn_watchdog(void *);
static void bwn_dma_stop(struct bwn_mac *);
static void bwn_pio_stop(struct bwn_mac *);
static void bwn_dma_ringstop(struct bwn_dma_ring **);
static void bwn_led_attach(struct bwn_mac *);
static void bwn_led_newstate(struct bwn_mac *, enum ieee80211_state);
static void bwn_led_event(struct bwn_mac *, int);
static void bwn_led_blink_start(struct bwn_mac *, int, int);
static void bwn_led_blink_next(void *);
static void bwn_led_blink_end(void *);
static void bwn_rfswitch(void *);
static void bwn_rf_turnon(struct bwn_mac *);
static void bwn_rf_turnoff(struct bwn_mac *);
static void bwn_phy_lp_init_pre(struct bwn_mac *);
static int bwn_phy_lp_init(struct bwn_mac *);
static uint16_t bwn_phy_lp_read(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_write(struct bwn_mac *, uint16_t, uint16_t);
static void bwn_phy_lp_maskset(struct bwn_mac *, uint16_t, uint16_t,
uint16_t);
static uint16_t bwn_phy_lp_rf_read(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_rf_write(struct bwn_mac *, uint16_t, uint16_t);
static void bwn_phy_lp_rf_onoff(struct bwn_mac *, int);
static int bwn_phy_lp_switch_channel(struct bwn_mac *, uint32_t);
static uint32_t bwn_phy_lp_get_default_chan(struct bwn_mac *);
static void bwn_phy_lp_set_antenna(struct bwn_mac *, int);
static void bwn_phy_lp_task_60s(struct bwn_mac *);
static void bwn_phy_lp_readsprom(struct bwn_mac *);
static void bwn_phy_lp_bbinit(struct bwn_mac *);
static void bwn_phy_lp_txpctl_init(struct bwn_mac *);
static void bwn_phy_lp_calib(struct bwn_mac *);
static void bwn_phy_lp_switch_analog(struct bwn_mac *, int);
static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *, uint8_t);
static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_set_anafilter(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_set_gaintbl(struct bwn_mac *, uint32_t);
static void bwn_phy_lp_digflt_save(struct bwn_mac *);
static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *);
static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_bugfix(struct bwn_mac *);
static void bwn_phy_lp_digflt_restore(struct bwn_mac *);
static void bwn_phy_lp_tblinit(struct bwn_mac *);
static void bwn_phy_lp_bbinit_r2(struct bwn_mac *);
static void bwn_phy_lp_bbinit_r01(struct bwn_mac *);
static void bwn_phy_lp_b2062_init(struct bwn_mac *);
static void bwn_phy_lp_b2063_init(struct bwn_mac *);
static void bwn_phy_lp_rxcal_r2(struct bwn_mac *);
static void bwn_phy_lp_rccal_r12(struct bwn_mac *);
static void bwn_phy_lp_set_rccap(struct bwn_mac *);
static uint32_t bwn_phy_lp_roundup(uint32_t, uint32_t, uint8_t);
static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *);
static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *);
static void bwn_tab_write_multi(struct bwn_mac *, uint32_t, int,
const void *);
static void bwn_tab_read_multi(struct bwn_mac *, uint32_t, int, void *);
static struct bwn_txgain
bwn_phy_lp_get_txgain(struct bwn_mac *);
static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *);
static void bwn_phy_lp_set_txgain(struct bwn_mac *, struct bwn_txgain *);
static void bwn_phy_lp_set_bbmult(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_set_trsw_over(struct bwn_mac *, uint8_t, uint8_t);
static void bwn_phy_lp_set_rxgain(struct bwn_mac *, uint32_t);
static void bwn_phy_lp_set_deaf(struct bwn_mac *, uint8_t);
static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_clear_deaf(struct bwn_mac *, uint8_t);
static void bwn_phy_lp_tblinit_r01(struct bwn_mac *);
static void bwn_phy_lp_tblinit_r2(struct bwn_mac *);
static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *);
static void bwn_tab_write(struct bwn_mac *, uint32_t, uint32_t);
static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *);
static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *);
static int bwn_phy_lp_loopback(struct bwn_mac *);
static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *, int, int, int, int,
int);
static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *, uint16_t, uint8_t,
struct bwn_phy_lp_iq_est *);
static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *);
static uint32_t bwn_tab_read(struct bwn_mac *, uint32_t);
static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *, uint16_t);
static void bwn_phy_lp_set_txgain_override(struct bwn_mac *);
static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *);
static uint8_t bwn_nbits(int32_t);
static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *, int, int,
struct bwn_txgain_entry *);
static void bwn_phy_lp_gaintbl_write(struct bwn_mac *, int,
struct bwn_txgain_entry);
static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *, int,
struct bwn_txgain_entry);
static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *, int,
struct bwn_txgain_entry);
static void bwn_sysctl_node(struct bwn_softc *);
static struct resource_spec bwn_res_spec_legacy[] = {
{ SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
static struct resource_spec bwn_res_spec_msi[] = {
{ SYS_RES_IRQ, 1, RF_ACTIVE },
{ -1, 0, 0 }
};
static const struct bwn_channelinfo bwn_chantable_bg = {
.channels = {
{ 2412, 1, 30 }, { 2417, 2, 30 }, { 2422, 3, 30 },
{ 2427, 4, 30 }, { 2432, 5, 30 }, { 2437, 6, 30 },
{ 2442, 7, 30 }, { 2447, 8, 30 }, { 2452, 9, 30 },
{ 2457, 10, 30 }, { 2462, 11, 30 }, { 2467, 12, 30 },
{ 2472, 13, 30 }, { 2484, 14, 30 } },
.nchannels = 14
};
static const struct bwn_channelinfo bwn_chantable_a = {
.channels = {
{ 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 },
{ 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 },
{ 5230, 46, 30 }, { 5240, 48, 30 }, { 5260, 52, 30 },
{ 5280, 56, 30 }, { 5300, 60, 30 }, { 5320, 64, 30 },
{ 5500, 100, 30 }, { 5520, 104, 30 }, { 5540, 108, 30 },
{ 5560, 112, 30 }, { 5580, 116, 30 }, { 5600, 120, 30 },
{ 5620, 124, 30 }, { 5640, 128, 30 }, { 5660, 132, 30 },
{ 5680, 136, 30 }, { 5700, 140, 30 }, { 5745, 149, 30 },
{ 5765, 153, 30 }, { 5785, 157, 30 }, { 5805, 161, 30 },
{ 5825, 165, 30 }, { 5920, 184, 30 }, { 5940, 188, 30 },
{ 5960, 192, 30 }, { 5980, 196, 30 }, { 6000, 200, 30 },
{ 6020, 204, 30 }, { 6040, 208, 30 }, { 6060, 212, 30 },
{ 6080, 216, 30 } },
.nchannels = 37
};
static const struct bwn_channelinfo bwn_chantable_n = {
.channels = {
{ 5160, 32, 30 }, { 5170, 34, 30 }, { 5180, 36, 30 },
{ 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 },
{ 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 },
{ 5250, 50, 30 }, { 5260, 52, 30 }, { 5270, 54, 30 },
{ 5280, 56, 30 }, { 5290, 58, 30 }, { 5300, 60, 30 },
{ 5310, 62, 30 }, { 5320, 64, 30 }, { 5330, 66, 30 },
{ 5340, 68, 30 }, { 5350, 70, 30 }, { 5360, 72, 30 },
{ 5370, 74, 30 }, { 5380, 76, 30 }, { 5390, 78, 30 },
{ 5400, 80, 30 }, { 5410, 82, 30 }, { 5420, 84, 30 },
{ 5430, 86, 30 }, { 5440, 88, 30 }, { 5450, 90, 30 },
{ 5460, 92, 30 }, { 5470, 94, 30 }, { 5480, 96, 30 },
{ 5490, 98, 30 }, { 5500, 100, 30 }, { 5510, 102, 30 },
{ 5520, 104, 30 }, { 5530, 106, 30 }, { 5540, 108, 30 },
{ 5550, 110, 30 }, { 5560, 112, 30 }, { 5570, 114, 30 },
{ 5580, 116, 30 }, { 5590, 118, 30 }, { 5600, 120, 30 },
{ 5610, 122, 30 }, { 5620, 124, 30 }, { 5630, 126, 30 },
{ 5640, 128, 30 }, { 5650, 130, 30 }, { 5660, 132, 30 },
{ 5670, 134, 30 }, { 5680, 136, 30 }, { 5690, 138, 30 },
{ 5700, 140, 30 }, { 5710, 142, 30 }, { 5720, 144, 30 },
{ 5725, 145, 30 }, { 5730, 146, 30 }, { 5735, 147, 30 },
{ 5740, 148, 30 }, { 5745, 149, 30 }, { 5750, 150, 30 },
{ 5755, 151, 30 }, { 5760, 152, 30 }, { 5765, 153, 30 },
{ 5770, 154, 30 }, { 5775, 155, 30 }, { 5780, 156, 30 },
{ 5785, 157, 30 }, { 5790, 158, 30 }, { 5795, 159, 30 },
{ 5800, 160, 30 }, { 5805, 161, 30 }, { 5810, 162, 30 },
{ 5815, 163, 30 }, { 5820, 164, 30 }, { 5825, 165, 30 },
{ 5830, 166, 30 }, { 5840, 168, 30 }, { 5850, 170, 30 },
{ 5860, 172, 30 }, { 5870, 174, 30 }, { 5880, 176, 30 },
{ 5890, 178, 30 }, { 5900, 180, 30 }, { 5910, 182, 30 },
{ 5920, 184, 30 }, { 5930, 186, 30 }, { 5940, 188, 30 },
{ 5950, 190, 30 }, { 5960, 192, 30 }, { 5970, 194, 30 },
{ 5980, 196, 30 }, { 5990, 198, 30 }, { 6000, 200, 30 },
{ 6010, 202, 30 }, { 6020, 204, 30 }, { 6030, 206, 30 },
{ 6040, 208, 30 }, { 6050, 210, 30 }, { 6060, 212, 30 },
{ 6070, 214, 30 }, { 6080, 216, 30 }, { 6090, 218, 30 },
{ 6100, 220, 30 }, { 6110, 222, 30 }, { 6120, 224, 30 },
{ 6130, 226, 30 }, { 6140, 228, 30 } },
.nchannels = 110
};
static const uint8_t bwn_b2063_chantable_data[33][12] = {
{ 0x6f, 0x3c, 0x3c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6f, 0x2c, 0x2c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6f, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6e, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6e, 0xc, 0xc, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 },
{ 0x6a, 0xc, 0xc, 0, 0x2, 0x5, 0xd, 0xd, 0x77, 0x80, 0x20, 0 },
{ 0x6a, 0xc, 0xc, 0, 0x1, 0x5, 0xd, 0xc, 0x77, 0x80, 0x20, 0 },
{ 0x6a, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x80, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x70, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xb, 0xc, 0x77, 0x70, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0, 0x4, 0xb, 0xb, 0x77, 0x60, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xb, 0x77, 0x60, 0x20, 0 },
{ 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xa, 0x77, 0x60, 0x20, 0 },
{ 0x68, 0xc, 0xc, 0, 0, 0x2, 0x9, 0x9, 0x77, 0x60, 0x20, 0 },
{ 0x68, 0xc, 0xc, 0, 0, 0x1, 0x8, 0x8, 0x77, 0x50, 0x10, 0 },
{ 0x67, 0xc, 0xc, 0, 0, 0, 0x8, 0x8, 0x77, 0x50, 0x10, 0 },
{ 0x64, 0xc, 0xc, 0, 0, 0, 0x2, 0x1, 0x77, 0x20, 0, 0 },
{ 0x64, 0xc, 0xc, 0, 0, 0, 0x1, 0x1, 0x77, 0x20, 0, 0 },
{ 0x63, 0xc, 0xc, 0, 0, 0, 0x1, 0, 0x77, 0x10, 0, 0 },
{ 0x63, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 },
{ 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 },
{ 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 },
{ 0x61, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 },
{ 0x60, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 },
{ 0x6e, 0xc, 0xc, 0, 0x9, 0xe, 0xf, 0xf, 0x77, 0xc0, 0x50, 0 },
{ 0x6e, 0xc, 0xc, 0, 0x9, 0xd, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 },
{ 0x6e, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 },
{ 0x6d, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 },
{ 0x6d, 0xc, 0xc, 0, 0x8, 0xb, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 },
{ 0x6d, 0xc, 0xc, 0, 0x8, 0xa, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 },
{ 0x6c, 0xc, 0xc, 0, 0x7, 0x9, 0xf, 0xf, 0x77, 0x90, 0x40, 0 },
{ 0x6c, 0xc, 0xc, 0, 0x6, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 },
{ 0x6c, 0xc, 0xc, 0, 0x5, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }
};
static const struct bwn_b206x_chan bwn_b2063_chantable[] = {
{ 1, 2412, bwn_b2063_chantable_data[0] },
{ 2, 2417, bwn_b2063_chantable_data[0] },
{ 3, 2422, bwn_b2063_chantable_data[0] },
{ 4, 2427, bwn_b2063_chantable_data[1] },
{ 5, 2432, bwn_b2063_chantable_data[1] },
{ 6, 2437, bwn_b2063_chantable_data[1] },
{ 7, 2442, bwn_b2063_chantable_data[1] },
{ 8, 2447, bwn_b2063_chantable_data[1] },
{ 9, 2452, bwn_b2063_chantable_data[2] },
{ 10, 2457, bwn_b2063_chantable_data[2] },
{ 11, 2462, bwn_b2063_chantable_data[3] },
{ 12, 2467, bwn_b2063_chantable_data[3] },
{ 13, 2472, bwn_b2063_chantable_data[3] },
{ 14, 2484, bwn_b2063_chantable_data[4] },
{ 34, 5170, bwn_b2063_chantable_data[5] },
{ 36, 5180, bwn_b2063_chantable_data[6] },
{ 38, 5190, bwn_b2063_chantable_data[7] },
{ 40, 5200, bwn_b2063_chantable_data[8] },
{ 42, 5210, bwn_b2063_chantable_data[9] },
{ 44, 5220, bwn_b2063_chantable_data[10] },
{ 46, 5230, bwn_b2063_chantable_data[11] },
{ 48, 5240, bwn_b2063_chantable_data[12] },
{ 52, 5260, bwn_b2063_chantable_data[13] },
{ 56, 5280, bwn_b2063_chantable_data[14] },
{ 60, 5300, bwn_b2063_chantable_data[14] },
{ 64, 5320, bwn_b2063_chantable_data[15] },
{ 100, 5500, bwn_b2063_chantable_data[16] },
{ 104, 5520, bwn_b2063_chantable_data[17] },
{ 108, 5540, bwn_b2063_chantable_data[18] },
{ 112, 5560, bwn_b2063_chantable_data[19] },
{ 116, 5580, bwn_b2063_chantable_data[20] },
{ 120, 5600, bwn_b2063_chantable_data[21] },
{ 124, 5620, bwn_b2063_chantable_data[21] },
{ 128, 5640, bwn_b2063_chantable_data[22] },
{ 132, 5660, bwn_b2063_chantable_data[22] },
{ 136, 5680, bwn_b2063_chantable_data[22] },
{ 140, 5700, bwn_b2063_chantable_data[23] },
{ 149, 5745, bwn_b2063_chantable_data[23] },
{ 153, 5765, bwn_b2063_chantable_data[23] },
{ 157, 5785, bwn_b2063_chantable_data[23] },
{ 161, 5805, bwn_b2063_chantable_data[23] },
{ 165, 5825, bwn_b2063_chantable_data[23] },
{ 184, 4920, bwn_b2063_chantable_data[24] },
{ 188, 4940, bwn_b2063_chantable_data[25] },
{ 192, 4960, bwn_b2063_chantable_data[26] },
{ 196, 4980, bwn_b2063_chantable_data[27] },
{ 200, 5000, bwn_b2063_chantable_data[28] },
{ 204, 5020, bwn_b2063_chantable_data[29] },
{ 208, 5040, bwn_b2063_chantable_data[30] },
{ 212, 5060, bwn_b2063_chantable_data[31] },
{ 216, 5080, bwn_b2063_chantable_data[32] }
};
static const uint8_t bwn_b2062_chantable_data[22][12] = {
{ 0xff, 0xff, 0xb5, 0x1b, 0x24, 0x32, 0x32, 0x88, 0x88, 0, 0, 0 },
{ 0, 0x22, 0x20, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x11, 0x10, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x11, 0x20, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x11, 0x10, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x11, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x63, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x62, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x30, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x20, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0x10, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 },
{ 0, 0, 0, 0, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 },
{ 0x55, 0x77, 0x90, 0xf7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x44, 0x77, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x44, 0x66, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x33, 0x66, 0x70, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x22, 0x55, 0x60, 0xd7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x22, 0x55, 0x60, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x22, 0x44, 0x50, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 },
{ 0x11, 0x44, 0x50, 0xa5, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 },
{ 0, 0x44, 0x40, 0xb6, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }
};
static const struct bwn_b206x_chan bwn_b2062_chantable[] = {
{ 1, 2412, bwn_b2062_chantable_data[0] },
{ 2, 2417, bwn_b2062_chantable_data[0] },
{ 3, 2422, bwn_b2062_chantable_data[0] },
{ 4, 2427, bwn_b2062_chantable_data[0] },
{ 5, 2432, bwn_b2062_chantable_data[0] },
{ 6, 2437, bwn_b2062_chantable_data[0] },
{ 7, 2442, bwn_b2062_chantable_data[0] },
{ 8, 2447, bwn_b2062_chantable_data[0] },
{ 9, 2452, bwn_b2062_chantable_data[0] },
{ 10, 2457, bwn_b2062_chantable_data[0] },
{ 11, 2462, bwn_b2062_chantable_data[0] },
{ 12, 2467, bwn_b2062_chantable_data[0] },
{ 13, 2472, bwn_b2062_chantable_data[0] },
{ 14, 2484, bwn_b2062_chantable_data[0] },
{ 34, 5170, bwn_b2062_chantable_data[1] },
{ 38, 5190, bwn_b2062_chantable_data[2] },
{ 42, 5210, bwn_b2062_chantable_data[2] },
{ 46, 5230, bwn_b2062_chantable_data[3] },
{ 36, 5180, bwn_b2062_chantable_data[4] },
{ 40, 5200, bwn_b2062_chantable_data[5] },
{ 44, 5220, bwn_b2062_chantable_data[6] },
{ 48, 5240, bwn_b2062_chantable_data[3] },
{ 52, 5260, bwn_b2062_chantable_data[3] },
{ 56, 5280, bwn_b2062_chantable_data[3] },
{ 60, 5300, bwn_b2062_chantable_data[7] },
{ 64, 5320, bwn_b2062_chantable_data[8] },
{ 100, 5500, bwn_b2062_chantable_data[9] },
{ 104, 5520, bwn_b2062_chantable_data[10] },
{ 108, 5540, bwn_b2062_chantable_data[10] },
{ 112, 5560, bwn_b2062_chantable_data[10] },
{ 116, 5580, bwn_b2062_chantable_data[11] },
{ 120, 5600, bwn_b2062_chantable_data[12] },
{ 124, 5620, bwn_b2062_chantable_data[12] },
{ 128, 5640, bwn_b2062_chantable_data[12] },
{ 132, 5660, bwn_b2062_chantable_data[12] },
{ 136, 5680, bwn_b2062_chantable_data[12] },
{ 140, 5700, bwn_b2062_chantable_data[12] },
{ 149, 5745, bwn_b2062_chantable_data[12] },
{ 153, 5765, bwn_b2062_chantable_data[12] },
{ 157, 5785, bwn_b2062_chantable_data[12] },
{ 161, 5805, bwn_b2062_chantable_data[12] },
{ 165, 5825, bwn_b2062_chantable_data[12] },
{ 184, 4920, bwn_b2062_chantable_data[13] },
{ 188, 4940, bwn_b2062_chantable_data[14] },
{ 192, 4960, bwn_b2062_chantable_data[15] },
{ 196, 4980, bwn_b2062_chantable_data[16] },
{ 200, 5000, bwn_b2062_chantable_data[17] },
{ 204, 5020, bwn_b2062_chantable_data[18] },
{ 208, 5040, bwn_b2062_chantable_data[19] },
{ 212, 5060, bwn_b2062_chantable_data[20] },
{ 216, 5080, bwn_b2062_chantable_data[21] }
};
/* for LP PHY */
static const struct bwn_rxcompco bwn_rxcompco_5354[] = {
{ 1, -66, 15 }, { 2, -66, 15 }, { 3, -66, 15 }, { 4, -66, 15 },
{ 5, -66, 15 }, { 6, -66, 15 }, { 7, -66, 14 }, { 8, -66, 14 },
{ 9, -66, 14 }, { 10, -66, 14 }, { 11, -66, 14 }, { 12, -66, 13 },
{ 13, -66, 13 }, { 14, -66, 13 },
};
/* for LP PHY */
static const struct bwn_rxcompco bwn_rxcompco_r12[] = {
{ 1, -64, 13 }, { 2, -64, 13 }, { 3, -64, 13 }, { 4, -64, 13 },
{ 5, -64, 12 }, { 6, -64, 12 }, { 7, -64, 12 }, { 8, -64, 12 },
{ 9, -64, 12 }, { 10, -64, 11 }, { 11, -64, 11 }, { 12, -64, 11 },
{ 13, -64, 11 }, { 14, -64, 10 }, { 34, -62, 24 }, { 38, -62, 24 },
{ 42, -62, 24 }, { 46, -62, 23 }, { 36, -62, 24 }, { 40, -62, 24 },
{ 44, -62, 23 }, { 48, -62, 23 }, { 52, -62, 23 }, { 56, -62, 22 },
{ 60, -62, 22 }, { 64, -62, 22 }, { 100, -62, 16 }, { 104, -62, 16 },
{ 108, -62, 15 }, { 112, -62, 14 }, { 116, -62, 14 }, { 120, -62, 13 },
{ 124, -62, 12 }, { 128, -62, 12 }, { 132, -62, 12 }, { 136, -62, 11 },
{ 140, -62, 10 }, { 149, -61, 9 }, { 153, -61, 9 }, { 157, -61, 9 },
{ 161, -61, 8 }, { 165, -61, 8 }, { 184, -62, 25 }, { 188, -62, 25 },
{ 192, -62, 25 }, { 196, -62, 25 }, { 200, -62, 25 }, { 204, -62, 25 },
{ 208, -62, 25 }, { 212, -62, 25 }, { 216, -62, 26 },
};
static const struct bwn_rxcompco bwn_rxcompco_r2 = { 0, -64, 0 };
static const uint8_t bwn_tab_sigsq_tbl[] = {
0xde, 0xdc, 0xda, 0xd8, 0xd6, 0xd4, 0xd2, 0xcf, 0xcd,
0xca, 0xc7, 0xc4, 0xc1, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe,
0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0x00,
0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe,
0xbe, 0xbe, 0xbe, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd,
0xcf, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
};
static const uint8_t bwn_tab_pllfrac_tbl[] = {
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
};
static const uint16_t bwn_tabl_iqlocal_tbl[] = {
0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002,
0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600,
0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006,
0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
};
static const uint16_t bwn_tab_noise_g1[] = BWN_TAB_NOISE_G1;
static const uint16_t bwn_tab_noise_g2[] = BWN_TAB_NOISE_G2;
static const uint16_t bwn_tab_noisescale_g1[] = BWN_TAB_NOISESCALE_G1;
static const uint16_t bwn_tab_noisescale_g2[] = BWN_TAB_NOISESCALE_G2;
static const uint16_t bwn_tab_noisescale_g3[] = BWN_TAB_NOISESCALE_G3;
const uint8_t bwn_bitrev_table[256] = BWN_BITREV_TABLE;
#define VENDOR_LED_ACT(vendor) \
{ \
.vid = PCI_VENDOR_##vendor, \
.led_act = { BWN_VENDOR_LED_ACT_##vendor } \
}
static const struct {
uint16_t vid;
uint8_t led_act[BWN_LED_MAX];
} bwn_vendor_led_act[] = {
VENDOR_LED_ACT(COMPAQ),
VENDOR_LED_ACT(ASUSTEK)
};
static const uint8_t bwn_default_led_act[BWN_LED_MAX] =
{ BWN_VENDOR_LED_ACT_DEFAULT };
#undef VENDOR_LED_ACT
static const struct {
int on_dur;
int off_dur;
} bwn_led_duration[109] = {
[0] = { 400, 100 },
[2] = { 150, 75 },
[4] = { 90, 45 },
[11] = { 66, 34 },
[12] = { 53, 26 },
[18] = { 42, 21 },
[22] = { 35, 17 },
[24] = { 32, 16 },
[36] = { 21, 10 },
[48] = { 16, 8 },
[72] = { 11, 5 },
[96] = { 9, 4 },
[108] = { 7, 3 }
};
static const uint16_t bwn_wme_shm_offsets[] = {
[0] = BWN_WME_BESTEFFORT,
[1] = BWN_WME_BACKGROUND,
[2] = BWN_WME_VOICE,
[3] = BWN_WME_VIDEO,
};
static const struct siba_devid bwn_devs[] = {
SIBA_DEV(BROADCOM, 80211, 5, "Revision 5"),
SIBA_DEV(BROADCOM, 80211, 6, "Revision 6"),
SIBA_DEV(BROADCOM, 80211, 7, "Revision 7"),
SIBA_DEV(BROADCOM, 80211, 9, "Revision 9"),
SIBA_DEV(BROADCOM, 80211, 10, "Revision 10"),
SIBA_DEV(BROADCOM, 80211, 11, "Revision 11"),
SIBA_DEV(BROADCOM, 80211, 13, "Revision 13"),
SIBA_DEV(BROADCOM, 80211, 15, "Revision 15"),
SIBA_DEV(BROADCOM, 80211, 16, "Revision 16")
};
static int
bwn_probe(device_t dev)
{
int i;
for (i = 0; i < sizeof(bwn_devs) / sizeof(bwn_devs[0]); i++) {
if (siba_get_vendor(dev) == bwn_devs[i].sd_vendor &&
siba_get_device(dev) == bwn_devs[i].sd_device &&
siba_get_revid(dev) == bwn_devs[i].sd_rev)
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
bwn_attach(device_t dev)
{
struct bwn_mac *mac;
struct bwn_softc *sc = device_get_softc(dev);
int error, i, msic, reg;
sc->sc_dev = dev;
#ifdef BWN_DEBUG
sc->sc_debug = bwn_debug;
#endif
if ((sc->sc_flags & BWN_FLAG_ATTACHED) == 0) {
- error = bwn_attach_pre(sc);
- if (error != 0)
- return (error);
+ bwn_attach_pre(sc);
bwn_sprom_bugfixes(dev);
sc->sc_flags |= BWN_FLAG_ATTACHED;
}
if (!TAILQ_EMPTY(&sc->sc_maclist)) {
if (siba_get_pci_device(dev) != 0x4313 &&
siba_get_pci_device(dev) != 0x431a &&
siba_get_pci_device(dev) != 0x4321) {
device_printf(sc->sc_dev,
"skip 802.11 cores\n");
return (ENODEV);
}
}
- mac = (struct bwn_mac *)malloc(sizeof(*mac), M_DEVBUF,
- M_NOWAIT | M_ZERO);
- if (mac == NULL)
- return (ENOMEM);
+ mac = malloc(sizeof(*mac), M_DEVBUF, M_WAITOK | M_ZERO);
mac->mac_sc = sc;
mac->mac_status = BWN_MAC_STATUS_UNINIT;
if (bwn_bfp != 0)
mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP;
TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac);
TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac);
TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac);
error = bwn_attach_core(mac);
if (error)
goto fail0;
bwn_led_attach(mac);
device_printf(sc->sc_dev, "WLAN (chipid %#x rev %u) "
"PHY (analog %d type %d rev %d) RADIO (manuf %#x ver %#x rev %d)\n",
siba_get_chipid(sc->sc_dev), siba_get_revid(sc->sc_dev),
mac->mac_phy.analog, mac->mac_phy.type, mac->mac_phy.rev,
mac->mac_phy.rf_manuf, mac->mac_phy.rf_ver,
mac->mac_phy.rf_rev);
if (mac->mac_flags & BWN_MAC_FLAG_DMA)
device_printf(sc->sc_dev, "DMA (%d bits)\n",
mac->mac_method.dma.dmatype);
else
device_printf(sc->sc_dev, "PIO\n");
/*
* setup PCI resources and interrupt.
*/
if (pci_find_cap(dev, PCIY_EXPRESS, &reg) == 0) {
msic = pci_msi_count(dev);
if (bootverbose)
device_printf(sc->sc_dev, "MSI count : %d\n", msic);
} else
msic = 0;
mac->mac_intr_spec = bwn_res_spec_legacy;
if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) {
if (pci_alloc_msi(dev, &msic) == 0) {
device_printf(sc->sc_dev,
"Using %d MSI messages\n", msic);
mac->mac_intr_spec = bwn_res_spec_msi;
mac->mac_msi = 1;
}
}
error = bus_alloc_resources(dev, mac->mac_intr_spec,
mac->mac_res_irq);
if (error) {
device_printf(sc->sc_dev,
"couldn't allocate IRQ resources (%d)\n", error);
goto fail1;
}
if (mac->mac_msi == 0)
error = bus_setup_intr(dev, mac->mac_res_irq[0],
INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac,
&mac->mac_intrhand[0]);
else {
for (i = 0; i < BWN_MSI_MESSAGES; i++) {
error = bus_setup_intr(dev, mac->mac_res_irq[i],
INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac,
&mac->mac_intrhand[i]);
if (error != 0) {
device_printf(sc->sc_dev,
"couldn't setup interrupt (%d)\n", error);
break;
}
}
}
TAILQ_INSERT_TAIL(&sc->sc_maclist, mac, mac_list);
/*
* calls attach-post routine
*/
if ((sc->sc_flags & BWN_FLAG_ATTACHED) != 0)
bwn_attach_post(sc);
return (0);
fail1:
if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0)
pci_release_msi(dev);
fail0:
free(mac, M_DEVBUF);
return (error);
}
static int
bwn_is_valid_ether_addr(uint8_t *addr)
{
char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN)))
return (FALSE);
return (TRUE);
}
static int
bwn_attach_post(struct bwn_softc *sc)
{
- struct ieee80211com *ic;
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
- ic = ifp->if_l2com;
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(sc->sc_dev);
/* XXX not right but it's not used anywhere important */
ic->ic_phytype = IEEE80211_T_OFDM;
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WME /* WME/WMM supported */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
| IEEE80211_C_TXPMGT /* capable of txpow mgt */
;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */
- /* call MI attach routine. */
- ieee80211_ifattach(ic,
+ IEEE80211_ADDR_COPY(ic->ic_macaddr,
bwn_is_valid_ether_addr(siba_sprom_get_mac_80211a(sc->sc_dev)) ?
siba_sprom_get_mac_80211a(sc->sc_dev) :
siba_sprom_get_mac_80211bg(sc->sc_dev));
+ /* call MI attach routine. */
+ ieee80211_ifattach(ic);
+
ic->ic_headroom = sizeof(struct bwn_txhdr);
/* override default methods */
ic->ic_raw_xmit = bwn_raw_xmit;
ic->ic_updateslot = bwn_updateslot;
ic->ic_update_promisc = bwn_update_promisc;
ic->ic_wme.wme_update = bwn_wme_update;
-
ic->ic_scan_start = bwn_scan_start;
ic->ic_scan_end = bwn_scan_end;
ic->ic_set_channel = bwn_set_channel;
-
ic->ic_vap_create = bwn_vap_create;
ic->ic_vap_delete = bwn_vap_delete;
+ ic->ic_transmit = bwn_transmit;
+ ic->ic_parent = bwn_parent;
ieee80211_radiotap_attach(ic,
&sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
BWN_TX_RADIOTAP_PRESENT,
&sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
BWN_RX_RADIOTAP_PRESENT);
bwn_sysctl_node(sc);
if (bootverbose)
ieee80211_announce(ic);
return (0);
}
static void
bwn_phy_detach(struct bwn_mac *mac)
{
if (mac->mac_phy.detach != NULL)
mac->mac_phy.detach(mac);
}
static int
bwn_detach(device_t dev)
{
struct bwn_softc *sc = device_get_softc(dev);
struct bwn_mac *mac = sc->sc_curmac;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int i;
sc->sc_flags |= BWN_FLAG_INVALID;
if (device_is_attached(sc->sc_dev)) {
- bwn_stop(sc, 1);
+ BWN_LOCK(sc);
+ bwn_stop(sc);
+ BWN_UNLOCK(sc);
bwn_dma_free(mac);
callout_drain(&sc->sc_led_blink_ch);
callout_drain(&sc->sc_rfswitch_ch);
callout_drain(&sc->sc_task_ch);
callout_drain(&sc->sc_watchdog_ch);
bwn_phy_detach(mac);
- if (ifp != NULL) {
- ieee80211_draintask(ic, &mac->mac_hwreset);
- ieee80211_draintask(ic, &mac->mac_txpower);
- ieee80211_ifdetach(ic);
- if_free(ifp);
- }
+ ieee80211_draintask(ic, &mac->mac_hwreset);
+ ieee80211_draintask(ic, &mac->mac_txpower);
+ ieee80211_ifdetach(ic);
}
taskqueue_drain(sc->sc_tq, &mac->mac_intrtask);
taskqueue_free(sc->sc_tq);
for (i = 0; i < BWN_MSI_MESSAGES; i++) {
if (mac->mac_intrhand[i] != NULL) {
bus_teardown_intr(dev, mac->mac_res_irq[i],
mac->mac_intrhand[i]);
mac->mac_intrhand[i] = NULL;
}
}
bus_release_resources(dev, mac->mac_intr_spec, mac->mac_res_irq);
if (mac->mac_msi != 0)
pci_release_msi(dev);
-
+ mbufq_drain(&sc->sc_snd);
BWN_LOCK_DESTROY(sc);
return (0);
}
-static int
+static void
bwn_attach_pre(struct bwn_softc *sc)
{
- struct ifnet *ifp;
- int error = 0;
BWN_LOCK_INIT(sc);
TAILQ_INIT(&sc->sc_maclist);
callout_init_mtx(&sc->sc_rfswitch_ch, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_task_ch, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_watchdog_ch, &sc->sc_mtx, 0);
-
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
sc->sc_tq = taskqueue_create_fast("bwn_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->sc_tq);
taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
"%s taskq", device_get_nameunit(sc->sc_dev));
-
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
-
- /* set these up early for if_printf use */
- if_initname(ifp, device_get_name(sc->sc_dev),
- device_get_unit(sc->sc_dev));
-
- ifp->if_softc = sc;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = bwn_init;
- ifp->if_ioctl = bwn_ioctl;
- ifp->if_start = bwn_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- return (0);
-
-fail: BWN_LOCK_DESTROY(sc);
- return (error);
}
static void
bwn_sprom_bugfixes(device_t dev)
{
#define BWN_ISDEV(_vendor, _device, _subvendor, _subdevice) \
((siba_get_pci_vendor(dev) == PCI_VENDOR_##_vendor) && \
(siba_get_pci_device(dev) == _device) && \
(siba_get_pci_subvendor(dev) == PCI_VENDOR_##_subvendor) && \
(siba_get_pci_subdevice(dev) == _subdevice))
if (siba_get_pci_subvendor(dev) == PCI_VENDOR_APPLE &&
siba_get_pci_subdevice(dev) == 0x4e &&
siba_get_pci_revid(dev) > 0x40)
siba_sprom_set_bf_lo(dev,
siba_sprom_get_bf_lo(dev) | BWN_BFL_PACTRL);
if (siba_get_pci_subvendor(dev) == SIBA_BOARDVENDOR_DELL &&
siba_get_chipid(dev) == 0x4301 && siba_get_pci_revid(dev) == 0x74)
siba_sprom_set_bf_lo(dev,
siba_sprom_get_bf_lo(dev) | BWN_BFL_BTCOEXIST);
if (siba_get_type(dev) == SIBA_TYPE_PCI) {
if (BWN_ISDEV(BROADCOM, 0x4318, ASUSTEK, 0x100f) ||
BWN_ISDEV(BROADCOM, 0x4320, DELL, 0x0003) ||
BWN_ISDEV(BROADCOM, 0x4320, HP, 0x12f8) ||
BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0013) ||
BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0014) ||
BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0015) ||
BWN_ISDEV(BROADCOM, 0x4320, MOTOROLA, 0x7010))
siba_sprom_set_bf_lo(dev,
siba_sprom_get_bf_lo(dev) & ~BWN_BFL_BTCOEXIST);
}
#undef BWN_ISDEV
}
-static int
-bwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+bwn_parent(struct ieee80211com *ic)
{
-#define IS_RUNNING(ifp) \
- ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
- struct bwn_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *)data;
- int error = 0, startall;
+ struct bwn_softc *sc = ic->ic_softc;
+ int startall = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- startall = 0;
- if (IS_RUNNING(ifp)) {
- bwn_update_promisc(ic);
- } else if (ifp->if_flags & IFF_UP) {
- if ((sc->sc_flags & BWN_FLAG_INVALID) == 0) {
- bwn_init(sc);
- startall = 1;
- }
+ BWN_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) {
+ bwn_init(sc);
+ startall = 1;
} else
- bwn_stop(sc, 1);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
- }
- return (error);
+ bwn_update_promisc(ic);
+ } else if (sc->sc_flags & BWN_FLAG_RUNNING)
+ bwn_stop(sc);
+ BWN_UNLOCK(sc);
+
+ if (startall)
+ ieee80211_start_all(ic);
}
-static void
-bwn_start(struct ifnet *ifp)
+static int
+bwn_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct bwn_softc *sc = ifp->if_softc;
+ struct bwn_softc *sc = ic->ic_softc;
+ int error;
BWN_LOCK(sc);
- bwn_start_locked(ifp);
+ if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) {
+ BWN_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ BWN_UNLOCK(sc);
+ return (error);
+ }
+ bwn_start(sc);
BWN_UNLOCK(sc);
+ return (0);
}
static void
-bwn_start_locked(struct ifnet *ifp)
+bwn_start(struct bwn_softc *sc)
{
- struct bwn_softc *sc = ifp->if_softc;
struct bwn_mac *mac = sc->sc_curmac;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct ieee80211_key *k;
struct mbuf *m;
BWN_ASSERT_LOCKED(sc);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || mac == NULL ||
+ if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac == NULL ||
mac->mac_status < BWN_MAC_STATUS_STARTED)
return;
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */
- if (m == NULL)
- break;
-
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
if (bwn_tx_isfull(sc, m))
break;
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
if (ni == NULL) {
device_printf(sc->sc_dev, "unexpected NULL ni\n");
m_freem(m);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
continue;
}
- KASSERT(ni != NULL, ("%s:%d: fail", __func__, __LINE__));
wh = mtod(m, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
m_freem(m);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
continue;
}
}
wh = NULL; /* Catch any invalid use */
-
if (bwn_tx_start(sc, ni, m) != 0) {
- if (ni != NULL)
+ if (ni != NULL) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ }
continue;
}
-
sc->sc_watchdog_timer = 5;
}
}
static int
bwn_tx_isfull(struct bwn_softc *sc, struct mbuf *m)
{
struct bwn_dma_ring *dr;
struct bwn_mac *mac = sc->sc_curmac;
struct bwn_pio_txqueue *tq;
- struct ifnet *ifp = sc->sc_ifp;
int pktlen = roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4);
BWN_ASSERT_LOCKED(sc);
if (mac->mac_flags & BWN_MAC_FLAG_DMA) {
dr = bwn_dma_select(mac, M_WME_GETAC(m));
if (dr->dr_stop == 1 ||
bwn_dma_freeslot(dr) < BWN_TX_SLOTS_PER_FRAME) {
dr->dr_stop = 1;
goto full;
}
} else {
tq = bwn_pio_select(mac, M_WME_GETAC(m));
if (tq->tq_free == 0 || pktlen > tq->tq_size ||
- pktlen > (tq->tq_size - tq->tq_used)) {
- tq->tq_stop = 1;
+ pktlen > (tq->tq_size - tq->tq_used))
goto full;
- }
}
return (0);
full:
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ mbufq_prepend(&sc->sc_snd, m);
return (1);
}
static int
bwn_tx_start(struct bwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m)
{
struct bwn_mac *mac = sc->sc_curmac;
int error;
BWN_ASSERT_LOCKED(sc);
if (m->m_pkthdr.len < IEEE80211_MIN_LEN || mac == NULL) {
m_freem(m);
return (ENXIO);
}
error = (mac->mac_flags & BWN_MAC_FLAG_DMA) ?
bwn_dma_tx_start(mac, ni, m) : bwn_pio_tx_start(mac, ni, m);
if (error) {
m_freem(m);
return (error);
}
return (0);
}
static int
bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m)
{
struct bwn_pio_txpkt *tp;
struct bwn_pio_txqueue *tq = bwn_pio_select(mac, M_WME_GETAC(m));
struct bwn_softc *sc = mac->mac_sc;
struct bwn_txhdr txhdr;
struct mbuf *m_new;
uint32_t ctl32;
int error;
uint16_t ctl16;
BWN_ASSERT_LOCKED(sc);
/* XXX TODO send packets after DTIM */
KASSERT(!TAILQ_EMPTY(&tq->tq_pktlist), ("%s: fail", __func__));
tp = TAILQ_FIRST(&tq->tq_pktlist);
tp->tp_ni = ni;
tp->tp_m = m;
error = bwn_set_txhdr(mac, ni, m, &txhdr, BWN_PIO_COOKIE(tq, tp));
if (error) {
device_printf(sc->sc_dev, "tx fail\n");
return (error);
}
TAILQ_REMOVE(&tq->tq_pktlist, tp, tp_list);
tq->tq_used += roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4);
tq->tq_free--;
if (siba_get_revid(sc->sc_dev) >= 8) {
/*
* XXX please removes m_defrag(9)
*/
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
device_printf(sc->sc_dev,
"%s: can't defrag TX buffer\n",
__func__);
return (ENOBUFS);
}
if (m_new->m_next != NULL)
device_printf(sc->sc_dev,
"TODO: fragmented packets for PIO\n");
tp->tp_m = m_new;
/* send HEADER */
ctl32 = bwn_pio_write_multi_4(mac, tq,
(BWN_PIO_READ_4(mac, tq, BWN_PIO8_TXCTL) |
BWN_PIO8_TXCTL_FRAMEREADY) & ~BWN_PIO8_TXCTL_EOF,
(const uint8_t *)&txhdr, BWN_HDRSIZE(mac));
/* send BODY */
ctl32 = bwn_pio_write_multi_4(mac, tq, ctl32,
mtod(m_new, const void *), m_new->m_pkthdr.len);
bwn_pio_write_4(mac, tq, BWN_PIO_TXCTL,
ctl32 | BWN_PIO8_TXCTL_EOF);
} else {
ctl16 = bwn_pio_write_multi_2(mac, tq,
(bwn_pio_read_2(mac, tq, BWN_PIO_TXCTL) |
BWN_PIO_TXCTL_FRAMEREADY) & ~BWN_PIO_TXCTL_EOF,
(const uint8_t *)&txhdr, BWN_HDRSIZE(mac));
ctl16 = bwn_pio_write_mbuf_2(mac, tq, ctl16, m);
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL,
ctl16 | BWN_PIO_TXCTL_EOF);
}
return (0);
}
static struct bwn_pio_txqueue *
bwn_pio_select(struct bwn_mac *mac, uint8_t prio)
{
if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0)
return (&mac->mac_method.pio.wme[WME_AC_BE]);
switch (prio) {
case 0:
return (&mac->mac_method.pio.wme[WME_AC_BE]);
case 1:
return (&mac->mac_method.pio.wme[WME_AC_BK]);
case 2:
return (&mac->mac_method.pio.wme[WME_AC_VI]);
case 3:
return (&mac->mac_method.pio.wme[WME_AC_VO]);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (NULL);
}
static int
bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m)
{
#define BWN_GET_TXHDRCACHE(slot) \
&(txhdr_cache[(slot / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac)])
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dma_ring *dr = bwn_dma_select(mac, M_WME_GETAC(m));
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *mt;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
uint8_t *txhdr_cache = (uint8_t *)dr->dr_txhdr_cache;
int error, slot, backup[2] = { dr->dr_curslot, dr->dr_usedslot };
BWN_ASSERT_LOCKED(sc);
KASSERT(!dr->dr_stop, ("%s:%d: fail", __func__, __LINE__));
/* XXX send after DTIM */
slot = bwn_dma_getslot(dr);
dr->getdesc(dr, slot, &desc, &mt);
KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_HEADER,
("%s:%d: fail", __func__, __LINE__));
error = bwn_set_txhdr(dr->dr_mac, ni, m,
(struct bwn_txhdr *)BWN_GET_TXHDRCACHE(slot),
BWN_DMA_COOKIE(dr, slot));
if (error)
goto fail;
error = bus_dmamap_load(dr->dr_txring_dtag, mt->mt_dmap,
BWN_GET_TXHDRCACHE(slot), BWN_HDRSIZE(mac), bwn_dma_ring_addr,
&mt->mt_paddr, BUS_DMA_NOWAIT);
if (error) {
- if_printf(ifp, "%s: can't load TX buffer (1) %d\n",
+ device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n",
__func__, error);
goto fail;
}
bus_dmamap_sync(dr->dr_txring_dtag, mt->mt_dmap,
BUS_DMASYNC_PREWRITE);
dr->setdesc(dr, desc, mt->mt_paddr, BWN_HDRSIZE(mac), 1, 0, 0);
bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap,
BUS_DMASYNC_PREWRITE);
slot = bwn_dma_getslot(dr);
dr->getdesc(dr, slot, &desc, &mt);
KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_BODY &&
mt->mt_islast == 1, ("%s:%d: fail", __func__, __LINE__));
mt->mt_m = m;
mt->mt_ni = ni;
error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m,
bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT);
if (error && error != EFBIG) {
- if_printf(ifp, "%s: can't load TX buffer (1) %d\n",
+ device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n",
__func__, error);
goto fail;
}
if (error) { /* error == EFBIG */
struct mbuf *m_new;
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
- if_printf(ifp, "%s: can't defrag TX buffer\n",
+ device_printf(sc->sc_dev,
+ "%s: can't defrag TX buffer\n",
__func__);
error = ENOBUFS;
goto fail;
} else {
m = m_new;
}
mt->mt_m = m;
error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap,
m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT);
if (error) {
- if_printf(ifp, "%s: can't load TX buffer (2) %d\n",
+ device_printf(sc->sc_dev,
+ "%s: can't load TX buffer (2) %d\n",
__func__, error);
goto fail;
}
}
bus_dmamap_sync(dma->txbuf_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE);
dr->setdesc(dr, desc, mt->mt_paddr, m->m_pkthdr.len, 0, 1, 1);
bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap,
BUS_DMASYNC_PREWRITE);
/* XXX send after DTIM */
dr->start_transfer(dr, bwn_dma_nextslot(dr, slot));
return (0);
fail:
dr->dr_curslot = backup[0];
dr->dr_usedslot = backup[1];
return (error);
#undef BWN_GET_TXHDRCACHE
}
static void
bwn_watchdog(void *arg)
{
struct bwn_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
if (sc->sc_watchdog_timer != 0 && --sc->sc_watchdog_timer == 0) {
- if_printf(ifp, "device timeout\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ device_printf(sc->sc_dev, "device timeout\n");
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
}
callout_schedule(&sc->sc_watchdog_ch, hz);
}
static int
bwn_attach_core(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int error, have_bg = 0, have_a = 0;
uint32_t high;
KASSERT(siba_get_revid(sc->sc_dev) >= 5,
("unsupported revision %d", siba_get_revid(sc->sc_dev)));
siba_powerup(sc->sc_dev, 0);
high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH);
bwn_reset_core(mac,
(high & BWN_TGSHIGH_HAVE_2GHZ) ? BWN_TGSLOW_SUPPORT_G : 0);
error = bwn_phy_getinfo(mac, high);
if (error)
goto fail;
have_a = (high & BWN_TGSHIGH_HAVE_5GHZ) ? 1 : 0;
have_bg = (high & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0;
if (siba_get_pci_device(sc->sc_dev) != 0x4312 &&
siba_get_pci_device(sc->sc_dev) != 0x4319 &&
siba_get_pci_device(sc->sc_dev) != 0x4324) {
have_a = have_bg = 0;
if (mac->mac_phy.type == BWN_PHYTYPE_A)
have_a = 1;
else if (mac->mac_phy.type == BWN_PHYTYPE_G ||
mac->mac_phy.type == BWN_PHYTYPE_N ||
mac->mac_phy.type == BWN_PHYTYPE_LP)
have_bg = 1;
else
KASSERT(0 == 1, ("%s: unknown phy type (%d)", __func__,
mac->mac_phy.type));
}
/* XXX turns off PHY A because it's not supported */
if (mac->mac_phy.type != BWN_PHYTYPE_LP &&
mac->mac_phy.type != BWN_PHYTYPE_N) {
have_a = 0;
have_bg = 1;
}
if (mac->mac_phy.type == BWN_PHYTYPE_G) {
mac->mac_phy.attach = bwn_phy_g_attach;
mac->mac_phy.detach = bwn_phy_g_detach;
mac->mac_phy.prepare_hw = bwn_phy_g_prepare_hw;
mac->mac_phy.init_pre = bwn_phy_g_init_pre;
mac->mac_phy.init = bwn_phy_g_init;
mac->mac_phy.exit = bwn_phy_g_exit;
mac->mac_phy.phy_read = bwn_phy_g_read;
mac->mac_phy.phy_write = bwn_phy_g_write;
mac->mac_phy.rf_read = bwn_phy_g_rf_read;
mac->mac_phy.rf_write = bwn_phy_g_rf_write;
mac->mac_phy.use_hwpctl = bwn_phy_g_hwpctl;
mac->mac_phy.rf_onoff = bwn_phy_g_rf_onoff;
mac->mac_phy.switch_analog = bwn_phy_switch_analog;
mac->mac_phy.switch_channel = bwn_phy_g_switch_channel;
mac->mac_phy.get_default_chan = bwn_phy_g_get_default_chan;
mac->mac_phy.set_antenna = bwn_phy_g_set_antenna;
mac->mac_phy.set_im = bwn_phy_g_im;
mac->mac_phy.recalc_txpwr = bwn_phy_g_recalc_txpwr;
mac->mac_phy.set_txpwr = bwn_phy_g_set_txpwr;
mac->mac_phy.task_15s = bwn_phy_g_task_15s;
mac->mac_phy.task_60s = bwn_phy_g_task_60s;
} else if (mac->mac_phy.type == BWN_PHYTYPE_LP) {
mac->mac_phy.init_pre = bwn_phy_lp_init_pre;
mac->mac_phy.init = bwn_phy_lp_init;
mac->mac_phy.phy_read = bwn_phy_lp_read;
mac->mac_phy.phy_write = bwn_phy_lp_write;
mac->mac_phy.phy_maskset = bwn_phy_lp_maskset;
mac->mac_phy.rf_read = bwn_phy_lp_rf_read;
mac->mac_phy.rf_write = bwn_phy_lp_rf_write;
mac->mac_phy.rf_onoff = bwn_phy_lp_rf_onoff;
mac->mac_phy.switch_analog = bwn_phy_lp_switch_analog;
mac->mac_phy.switch_channel = bwn_phy_lp_switch_channel;
mac->mac_phy.get_default_chan = bwn_phy_lp_get_default_chan;
mac->mac_phy.set_antenna = bwn_phy_lp_set_antenna;
mac->mac_phy.task_60s = bwn_phy_lp_task_60s;
} else {
device_printf(sc->sc_dev, "unsupported PHY type (%d)\n",
mac->mac_phy.type);
error = ENXIO;
goto fail;
}
mac->mac_phy.gmode = have_bg;
if (mac->mac_phy.attach != NULL) {
error = mac->mac_phy.attach(mac);
if (error) {
device_printf(sc->sc_dev, "failed\n");
goto fail;
}
}
bwn_reset_core(mac, have_bg ? BWN_TGSLOW_SUPPORT_G : 0);
error = bwn_chiptest(mac);
if (error)
goto fail;
error = bwn_setup_channels(mac, have_bg, have_a);
if (error) {
device_printf(sc->sc_dev, "failed to setup channels\n");
goto fail;
}
if (sc->sc_curmac == NULL)
sc->sc_curmac = mac;
error = bwn_dma_attach(mac);
if (error != 0) {
device_printf(sc->sc_dev, "failed to initialize DMA\n");
goto fail;
}
mac->mac_phy.switch_analog(mac, 0);
siba_dev_down(sc->sc_dev, 0);
fail:
siba_powerdown(sc->sc_dev);
return (error);
}
static void
bwn_reset_core(struct bwn_mac *mac, uint32_t flags)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t low, ctl;
flags |= (BWN_TGSLOW_PHYCLOCK_ENABLE | BWN_TGSLOW_PHYRESET);
siba_dev_up(sc->sc_dev, flags);
DELAY(2000);
low = (siba_read_4(sc->sc_dev, SIBA_TGSLOW) | SIBA_TGSLOW_FGC) &
~BWN_TGSLOW_PHYRESET;
siba_write_4(sc->sc_dev, SIBA_TGSLOW, low);
siba_read_4(sc->sc_dev, SIBA_TGSLOW);
DELAY(1000);
siba_write_4(sc->sc_dev, SIBA_TGSLOW, low & ~SIBA_TGSLOW_FGC);
siba_read_4(sc->sc_dev, SIBA_TGSLOW);
DELAY(1000);
if (mac->mac_phy.switch_analog != NULL)
mac->mac_phy.switch_analog(mac, 1);
ctl = BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GMODE;
if (flags & BWN_TGSLOW_SUPPORT_G)
ctl |= BWN_MACCTL_GMODE;
BWN_WRITE_4(mac, BWN_MACCTL, ctl | BWN_MACCTL_IHR_ON);
}
static int
bwn_phy_getinfo(struct bwn_mac *mac, int tgshigh)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
uint32_t tmp;
/* PHY */
tmp = BWN_READ_2(mac, BWN_PHYVER);
phy->gmode = (tgshigh & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0;
phy->rf_on = 1;
phy->analog = (tmp & BWN_PHYVER_ANALOG) >> 12;
phy->type = (tmp & BWN_PHYVER_TYPE) >> 8;
phy->rev = (tmp & BWN_PHYVER_VERSION);
if ((phy->type == BWN_PHYTYPE_A && phy->rev >= 4) ||
(phy->type == BWN_PHYTYPE_B && phy->rev != 2 &&
phy->rev != 4 && phy->rev != 6 && phy->rev != 7) ||
(phy->type == BWN_PHYTYPE_G && phy->rev > 9) ||
(phy->type == BWN_PHYTYPE_N && phy->rev > 4) ||
(phy->type == BWN_PHYTYPE_LP && phy->rev > 2))
goto unsupphy;
/* RADIO */
if (siba_get_chipid(sc->sc_dev) == 0x4317) {
if (siba_get_chiprev(sc->sc_dev) == 0)
tmp = 0x3205017f;
else if (siba_get_chiprev(sc->sc_dev) == 1)
tmp = 0x4205017f;
else
tmp = 0x5205017f;
} else {
BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID);
tmp = BWN_READ_2(mac, BWN_RFDATALO);
BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID);
tmp |= (uint32_t)BWN_READ_2(mac, BWN_RFDATAHI) << 16;
}
phy->rf_rev = (tmp & 0xf0000000) >> 28;
phy->rf_ver = (tmp & 0x0ffff000) >> 12;
phy->rf_manuf = (tmp & 0x00000fff);
if (phy->rf_manuf != 0x17f) /* 0x17f is broadcom */
goto unsupradio;
if ((phy->type == BWN_PHYTYPE_A && (phy->rf_ver != 0x2060 ||
phy->rf_rev != 1 || phy->rf_manuf != 0x17f)) ||
(phy->type == BWN_PHYTYPE_B && (phy->rf_ver & 0xfff0) != 0x2050) ||
(phy->type == BWN_PHYTYPE_G && phy->rf_ver != 0x2050) ||
(phy->type == BWN_PHYTYPE_N &&
phy->rf_ver != 0x2055 && phy->rf_ver != 0x2056) ||
(phy->type == BWN_PHYTYPE_LP &&
phy->rf_ver != 0x2062 && phy->rf_ver != 0x2063))
goto unsupradio;
return (0);
unsupphy:
device_printf(sc->sc_dev, "unsupported PHY (type %#x, rev %#x, "
"analog %#x)\n",
phy->type, phy->rev, phy->analog);
return (ENXIO);
unsupradio:
device_printf(sc->sc_dev, "unsupported radio (manuf %#x, ver %#x, "
"rev %#x)\n",
phy->rf_manuf, phy->rf_ver, phy->rf_rev);
return (ENXIO);
}
static int
bwn_chiptest(struct bwn_mac *mac)
{
#define TESTVAL0 0x55aaaa55
#define TESTVAL1 0xaa5555aa
struct bwn_softc *sc = mac->mac_sc;
uint32_t v, backup;
BWN_LOCK(sc);
backup = bwn_shm_read_4(mac, BWN_SHARED, 0);
bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL0);
if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL0)
goto error;
bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL1);
if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL1)
goto error;
bwn_shm_write_4(mac, BWN_SHARED, 0, backup);
if ((siba_get_revid(sc->sc_dev) >= 3) &&
(siba_get_revid(sc->sc_dev) <= 10)) {
BWN_WRITE_2(mac, BWN_TSF_CFP_START, 0xaaaa);
BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0xccccbbbb);
if (BWN_READ_2(mac, BWN_TSF_CFP_START_LOW) != 0xbbbb)
goto error;
if (BWN_READ_2(mac, BWN_TSF_CFP_START_HIGH) != 0xcccc)
goto error;
}
BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0);
v = BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_GMODE;
if (v != (BWN_MACCTL_GMODE | BWN_MACCTL_IHR_ON))
goto error;
BWN_UNLOCK(sc);
return (0);
error:
BWN_UNLOCK(sc);
device_printf(sc->sc_dev, "failed to validate the chipaccess\n");
return (ENODEV);
}
#define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT | IEEE80211_CHAN_G)
#define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT | IEEE80211_CHAN_A)
static int
bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
ic->ic_nchans = 0;
if (have_bg)
bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX,
&ic->ic_nchans, &bwn_chantable_bg, IEEE80211_CHAN_G);
if (mac->mac_phy.type == BWN_PHYTYPE_N) {
if (have_a)
bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX,
&ic->ic_nchans, &bwn_chantable_n,
IEEE80211_CHAN_HTA);
} else {
if (have_a)
bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX,
&ic->ic_nchans, &bwn_chantable_a,
IEEE80211_CHAN_A);
}
mac->mac_phy.supports_2ghz = have_bg;
mac->mac_phy.supports_5ghz = have_a;
return (ic->ic_nchans == 0 ? ENXIO : 0);
}
static uint32_t
bwn_shm_read_4(struct bwn_mac *mac, uint16_t way, uint16_t offset)
{
uint32_t ret;
BWN_ASSERT_LOCKED(mac->mac_sc);
if (way == BWN_SHARED) {
KASSERT((offset & 0x0001) == 0,
("%s:%d warn", __func__, __LINE__));
if (offset & 0x0003) {
bwn_shm_ctlword(mac, way, offset >> 2);
ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED);
ret <<= 16;
bwn_shm_ctlword(mac, way, (offset >> 2) + 1);
ret |= BWN_READ_2(mac, BWN_SHM_DATA);
goto out;
}
offset >>= 2;
}
bwn_shm_ctlword(mac, way, offset);
ret = BWN_READ_4(mac, BWN_SHM_DATA);
out:
return (ret);
}
static uint16_t
bwn_shm_read_2(struct bwn_mac *mac, uint16_t way, uint16_t offset)
{
uint16_t ret;
BWN_ASSERT_LOCKED(mac->mac_sc);
if (way == BWN_SHARED) {
KASSERT((offset & 0x0001) == 0,
("%s:%d warn", __func__, __LINE__));
if (offset & 0x0003) {
bwn_shm_ctlword(mac, way, offset >> 2);
ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED);
goto out;
}
offset >>= 2;
}
bwn_shm_ctlword(mac, way, offset);
ret = BWN_READ_2(mac, BWN_SHM_DATA);
out:
return (ret);
}
static void
bwn_shm_ctlword(struct bwn_mac *mac, uint16_t way,
uint16_t offset)
{
uint32_t control;
control = way;
control <<= 16;
control |= offset;
BWN_WRITE_4(mac, BWN_SHM_CONTROL, control);
}
static void
bwn_shm_write_4(struct bwn_mac *mac, uint16_t way, uint16_t offset,
uint32_t value)
{
BWN_ASSERT_LOCKED(mac->mac_sc);
if (way == BWN_SHARED) {
KASSERT((offset & 0x0001) == 0,
("%s:%d warn", __func__, __LINE__));
if (offset & 0x0003) {
bwn_shm_ctlword(mac, way, offset >> 2);
BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED,
(value >> 16) & 0xffff);
bwn_shm_ctlword(mac, way, (offset >> 2) + 1);
BWN_WRITE_2(mac, BWN_SHM_DATA, value & 0xffff);
return;
}
offset >>= 2;
}
bwn_shm_ctlword(mac, way, offset);
BWN_WRITE_4(mac, BWN_SHM_DATA, value);
}
static void
bwn_shm_write_2(struct bwn_mac *mac, uint16_t way, uint16_t offset,
uint16_t value)
{
BWN_ASSERT_LOCKED(mac->mac_sc);
if (way == BWN_SHARED) {
KASSERT((offset & 0x0001) == 0,
("%s:%d warn", __func__, __LINE__));
if (offset & 0x0003) {
bwn_shm_ctlword(mac, way, offset >> 2);
BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, value);
return;
}
offset >>= 2;
}
bwn_shm_ctlword(mac, way, offset);
BWN_WRITE_2(mac, BWN_SHM_DATA, value);
}
static void
bwn_addchan(struct ieee80211_channel *c, int freq, int flags, int ieee,
int txpow)
{
c->ic_freq = freq;
c->ic_flags = flags;
c->ic_ieee = ieee;
c->ic_minpower = 0;
c->ic_maxpower = 2 * txpow;
c->ic_maxregpower = txpow;
}
static void
bwn_addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
const struct bwn_channelinfo *ci, int flags)
{
struct ieee80211_channel *c;
int i;
c = &chans[*nchans];
for (i = 0; i < ci->nchannels; i++) {
const struct bwn_channel *hc;
hc = &ci->channels[i];
if (*nchans >= maxchans)
break;
bwn_addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
c++, (*nchans)++;
if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
/* g channel have a separate b-only entry */
if (*nchans >= maxchans)
break;
c[0] = c[-1];
c[-1].ic_flags = IEEE80211_CHAN_B;
c++, (*nchans)++;
}
if (flags == IEEE80211_CHAN_HTG) {
/* HT g channel have a separate g-only entry */
if (*nchans >= maxchans)
break;
c[-1].ic_flags = IEEE80211_CHAN_G;
c[0] = c[-1];
c[0].ic_flags &= ~IEEE80211_CHAN_HT;
c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
c++, (*nchans)++;
}
if (flags == IEEE80211_CHAN_HTA) {
/* HT a channel have a separate a-only entry */
if (*nchans >= maxchans)
break;
c[-1].ic_flags = IEEE80211_CHAN_A;
c[0] = c[-1];
c[0].ic_flags &= ~IEEE80211_CHAN_HT;
c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
c++, (*nchans)++;
}
}
}
static int
bwn_phy_g_attach(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
unsigned int i;
int16_t pab0, pab1, pab2;
static int8_t bwn_phy_g_tssi2dbm_table[] = BWN_PHY_G_TSSI2DBM_TABLE;
int8_t bg;
bg = (int8_t)siba_sprom_get_tssi_bg(sc->sc_dev);
pab0 = (int16_t)siba_sprom_get_pa0b0(sc->sc_dev);
pab1 = (int16_t)siba_sprom_get_pa0b1(sc->sc_dev);
pab2 = (int16_t)siba_sprom_get_pa0b2(sc->sc_dev);
if ((siba_get_chipid(sc->sc_dev) == 0x4301) && (phy->rf_ver != 0x2050))
device_printf(sc->sc_dev, "not supported anymore\n");
pg->pg_flags = 0;
if (pab0 == 0 || pab1 == 0 || pab2 == 0 || pab0 == -1 || pab1 == -1 ||
pab2 == -1) {
pg->pg_idletssi = 52;
pg->pg_tssi2dbm = bwn_phy_g_tssi2dbm_table;
return (0);
}
pg->pg_idletssi = (bg == 0 || bg == -1) ? 62 : bg;
pg->pg_tssi2dbm = (uint8_t *)malloc(64, M_DEVBUF, M_NOWAIT | M_ZERO);
if (pg->pg_tssi2dbm == NULL) {
device_printf(sc->sc_dev, "failed to allocate buffer\n");
return (ENOMEM);
}
for (i = 0; i < 64; i++) {
int32_t m1, m2, f, q, delta;
int8_t j = 0;
m1 = BWN_TSSI2DBM(16 * pab0 + i * pab1, 32);
m2 = MAX(BWN_TSSI2DBM(32768 + i * pab2, 256), 1);
f = 256;
do {
if (j > 15) {
device_printf(sc->sc_dev,
"failed to generate tssi2dBm\n");
free(pg->pg_tssi2dbm, M_DEVBUF);
return (ENOMEM);
}
q = BWN_TSSI2DBM(f * 4096 - BWN_TSSI2DBM(m2 * f, 16) *
f, 2048);
delta = abs(q - f);
f = q;
j++;
} while (delta >= 2);
pg->pg_tssi2dbm[i] = MIN(MAX(BWN_TSSI2DBM(m1 * f, 8192), -127),
128);
}
pg->pg_flags |= BWN_PHY_G_FLAG_TSSITABLE_ALLOC;
return (0);
}
static void
bwn_phy_g_detach(struct bwn_mac *mac)
{
struct bwn_phy_g *pg = &mac->mac_phy.phy_g;
if (pg->pg_flags & BWN_PHY_G_FLAG_TSSITABLE_ALLOC) {
free(pg->pg_tssi2dbm, M_DEVBUF);
pg->pg_tssi2dbm = NULL;
}
pg->pg_flags = 0;
}
static void
bwn_phy_g_init_pre(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
void *tssi2dbm;
int idletssi;
unsigned int i;
tssi2dbm = pg->pg_tssi2dbm;
idletssi = pg->pg_idletssi;
memset(pg, 0, sizeof(*pg));
pg->pg_tssi2dbm = tssi2dbm;
pg->pg_idletssi = idletssi;
memset(pg->pg_minlowsig, 0xff, sizeof(pg->pg_minlowsig));
for (i = 0; i < N(pg->pg_nrssi); i++)
pg->pg_nrssi[i] = -1000;
for (i = 0; i < N(pg->pg_nrssi_lt); i++)
pg->pg_nrssi_lt[i] = i;
pg->pg_lofcal = 0xffff;
pg->pg_initval = 0xffff;
pg->pg_immode = BWN_IMMODE_NONE;
pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_UNKNOWN;
pg->pg_avgtssi = 0xff;
pg->pg_loctl.tx_bias = 0xff;
TAILQ_INIT(&pg->pg_loctl.calib_list);
}
static int
bwn_phy_g_prepare_hw(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_txpwr_loctl *lo = &pg->pg_loctl;
static const struct bwn_rfatt rfatt0[] = {
{ 3, 0 }, { 1, 0 }, { 5, 0 }, { 7, 0 }, { 9, 0 }, { 2, 0 },
{ 0, 0 }, { 4, 0 }, { 6, 0 }, { 8, 0 }, { 1, 1 }, { 2, 1 },
{ 3, 1 }, { 4, 1 }
};
static const struct bwn_rfatt rfatt1[] = {
{ 2, 1 }, { 4, 1 }, { 6, 1 }, { 8, 1 }, { 10, 1 }, { 12, 1 },
{ 14, 1 }
};
static const struct bwn_rfatt rfatt2[] = {
{ 0, 1 }, { 2, 1 }, { 4, 1 }, { 6, 1 }, { 8, 1 }, { 9, 1 },
{ 9, 1 }
};
static const struct bwn_bbatt bbatt_0[] = {
{ 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 6 }, { 7 }, { 8 }
};
KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__));
if (phy->rf_ver == 0x2050 && phy->rf_rev < 6)
pg->pg_bbatt.att = 0;
else
pg->pg_bbatt.att = 2;
/* prepare Radio Attenuation */
pg->pg_rfatt.padmix = 0;
if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM &&
siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BCM4309G) {
if (siba_get_pci_revid(sc->sc_dev) < 0x43) {
pg->pg_rfatt.att = 2;
goto done;
} else if (siba_get_pci_revid(sc->sc_dev) < 0x51) {
pg->pg_rfatt.att = 3;
goto done;
}
}
if (phy->type == BWN_PHYTYPE_A) {
pg->pg_rfatt.att = 0x60;
goto done;
}
switch (phy->rf_ver) {
case 0x2050:
switch (phy->rf_rev) {
case 0:
pg->pg_rfatt.att = 5;
goto done;
case 1:
if (phy->type == BWN_PHYTYPE_G) {
if (siba_get_pci_subvendor(sc->sc_dev) ==
SIBA_BOARDVENDOR_BCM &&
siba_get_pci_subdevice(sc->sc_dev) ==
SIBA_BOARD_BCM4309G &&
siba_get_pci_revid(sc->sc_dev) >= 30)
pg->pg_rfatt.att = 3;
else if (siba_get_pci_subvendor(sc->sc_dev) ==
SIBA_BOARDVENDOR_BCM &&
siba_get_pci_subdevice(sc->sc_dev) ==
SIBA_BOARD_BU4306)
pg->pg_rfatt.att = 3;
else
pg->pg_rfatt.att = 1;
} else {
if (siba_get_pci_subvendor(sc->sc_dev) ==
SIBA_BOARDVENDOR_BCM &&
siba_get_pci_subdevice(sc->sc_dev) ==
SIBA_BOARD_BCM4309G &&
siba_get_pci_revid(sc->sc_dev) >= 30)
pg->pg_rfatt.att = 7;
else
pg->pg_rfatt.att = 6;
}
goto done;
case 2:
if (phy->type == BWN_PHYTYPE_G) {
if (siba_get_pci_subvendor(sc->sc_dev) ==
SIBA_BOARDVENDOR_BCM &&
siba_get_pci_subdevice(sc->sc_dev) ==
SIBA_BOARD_BCM4309G &&
siba_get_pci_revid(sc->sc_dev) >= 30)
pg->pg_rfatt.att = 3;
else if (siba_get_pci_subvendor(sc->sc_dev) ==
SIBA_BOARDVENDOR_BCM &&
siba_get_pci_subdevice(sc->sc_dev) ==
SIBA_BOARD_BU4306)
pg->pg_rfatt.att = 5;
else if (siba_get_chipid(sc->sc_dev) == 0x4320)
pg->pg_rfatt.att = 4;
else
pg->pg_rfatt.att = 3;
} else
pg->pg_rfatt.att = 6;
goto done;
case 3:
pg->pg_rfatt.att = 5;
goto done;
case 4:
case 5:
pg->pg_rfatt.att = 1;
goto done;
case 6:
case 7:
pg->pg_rfatt.att = 5;
goto done;
case 8:
pg->pg_rfatt.att = 0xa;
pg->pg_rfatt.padmix = 1;
goto done;
case 9:
default:
pg->pg_rfatt.att = 5;
goto done;
}
break;
case 0x2053:
switch (phy->rf_rev) {
case 1:
pg->pg_rfatt.att = 6;
goto done;
}
break;
}
pg->pg_rfatt.att = 5;
done:
pg->pg_txctl = (bwn_phy_g_txctl(mac) << 4);
if (!bwn_has_hwpctl(mac)) {
lo->rfatt.array = rfatt0;
lo->rfatt.len = N(rfatt0);
lo->rfatt.min = 0;
lo->rfatt.max = 9;
goto genbbatt;
}
if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) {
lo->rfatt.array = rfatt1;
lo->rfatt.len = N(rfatt1);
lo->rfatt.min = 0;
lo->rfatt.max = 14;
goto genbbatt;
}
lo->rfatt.array = rfatt2;
lo->rfatt.len = N(rfatt2);
lo->rfatt.min = 0;
lo->rfatt.max = 9;
genbbatt:
lo->bbatt.array = bbatt_0;
lo->bbatt.len = N(bbatt_0);
lo->bbatt.min = 0;
lo->bbatt.max = 8;
BWN_READ_4(mac, BWN_MACCTL);
if (phy->rev == 1) {
phy->gmode = 0;
bwn_reset_core(mac, 0);
bwn_phy_g_init_sub(mac);
phy->gmode = 1;
bwn_reset_core(mac, BWN_TGSLOW_SUPPORT_G);
}
return (0);
}
static uint16_t
bwn_phy_g_txctl(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
if (phy->rf_ver != 0x2050)
return (0);
if (phy->rf_rev == 1)
return (BWN_TXCTL_PA2DB | BWN_TXCTL_TXMIX);
if (phy->rf_rev < 6)
return (BWN_TXCTL_PA2DB);
if (phy->rf_rev == 8)
return (BWN_TXCTL_TXMIX);
return (0);
}
static int
bwn_phy_g_init(struct bwn_mac *mac)
{
bwn_phy_g_init_sub(mac);
return (0);
}
static void
bwn_phy_g_exit(struct bwn_mac *mac)
{
struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl;
struct bwn_lo_calib *cal, *tmp;
if (lo == NULL)
return;
TAILQ_FOREACH_SAFE(cal, &lo->calib_list, list, tmp) {
TAILQ_REMOVE(&lo->calib_list, cal, list);
free(cal, M_DEVBUF);
}
}
static uint16_t
bwn_phy_g_read(struct bwn_mac *mac, uint16_t reg)
{
BWN_WRITE_2(mac, BWN_PHYCTL, reg);
return (BWN_READ_2(mac, BWN_PHYDATA));
}
static void
bwn_phy_g_write(struct bwn_mac *mac, uint16_t reg, uint16_t value)
{
BWN_WRITE_2(mac, BWN_PHYCTL, reg);
BWN_WRITE_2(mac, BWN_PHYDATA, value);
}
static uint16_t
bwn_phy_g_rf_read(struct bwn_mac *mac, uint16_t reg)
{
KASSERT(reg != 1, ("%s:%d: fail", __func__, __LINE__));
BWN_WRITE_2(mac, BWN_RFCTL, reg | 0x80);
return (BWN_READ_2(mac, BWN_RFDATALO));
}
static void
bwn_phy_g_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value)
{
KASSERT(reg != 1, ("%s:%d: fail", __func__, __LINE__));
BWN_WRITE_2(mac, BWN_RFCTL, reg);
BWN_WRITE_2(mac, BWN_RFDATALO, value);
}
static int
bwn_phy_g_hwpctl(struct bwn_mac *mac)
{
return (mac->mac_phy.rev >= 6);
}
static void
bwn_phy_g_rf_onoff(struct bwn_mac *mac, int on)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
unsigned int channel;
uint16_t rfover, rfoverval;
if (on) {
if (phy->rf_on)
return;
BWN_PHY_WRITE(mac, 0x15, 0x8000);
BWN_PHY_WRITE(mac, 0x15, 0xcc00);
BWN_PHY_WRITE(mac, 0x15, (phy->gmode ? 0xc0 : 0x0));
if (pg->pg_flags & BWN_PHY_G_FLAG_RADIOCTX_VALID) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER,
pg->pg_radioctx_over);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
pg->pg_radioctx_overval);
pg->pg_flags &= ~BWN_PHY_G_FLAG_RADIOCTX_VALID;
}
channel = phy->chan;
bwn_phy_g_switch_chan(mac, 6, 1);
bwn_phy_g_switch_chan(mac, channel, 0);
return;
}
rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER);
rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL);
pg->pg_radioctx_over = rfover;
pg->pg_radioctx_overval = rfoverval;
pg->pg_flags |= BWN_PHY_G_FLAG_RADIOCTX_VALID;
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, rfover | 0x008c);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfoverval & 0xff73);
}
static int
bwn_phy_g_switch_channel(struct bwn_mac *mac, uint32_t newchan)
{
if ((newchan < 1) || (newchan > 14))
return (EINVAL);
bwn_phy_g_switch_chan(mac, newchan, 0);
return (0);
}
static uint32_t
bwn_phy_g_get_default_chan(struct bwn_mac *mac)
{
return (1);
}
static void
bwn_phy_g_set_antenna(struct bwn_mac *mac, int antenna)
{
struct bwn_phy *phy = &mac->mac_phy;
uint64_t hf;
int autodiv = 0;
uint16_t tmp;
if (antenna == BWN_ANTAUTO0 || antenna == BWN_ANTAUTO1)
autodiv = 1;
hf = bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER;
bwn_hf_write(mac, hf);
BWN_PHY_WRITE(mac, BWN_PHY_BBANDCFG,
(BWN_PHY_READ(mac, BWN_PHY_BBANDCFG) & ~BWN_PHY_BBANDCFG_RXANT) |
((autodiv ? BWN_ANTAUTO1 : antenna)
<< BWN_PHY_BBANDCFG_RXANT_SHIFT));
if (autodiv) {
tmp = BWN_PHY_READ(mac, BWN_PHY_ANTDWELL);
if (antenna == BWN_ANTAUTO1)
tmp &= ~BWN_PHY_ANTDWELL_AUTODIV1;
else
tmp |= BWN_PHY_ANTDWELL_AUTODIV1;
BWN_PHY_WRITE(mac, BWN_PHY_ANTDWELL, tmp);
}
tmp = BWN_PHY_READ(mac, BWN_PHY_ANTWRSETT);
if (autodiv)
tmp |= BWN_PHY_ANTWRSETT_ARXDIV;
else
tmp &= ~BWN_PHY_ANTWRSETT_ARXDIV;
BWN_PHY_WRITE(mac, BWN_PHY_ANTWRSETT, tmp);
if (phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_OFDM61,
BWN_PHY_READ(mac, BWN_PHY_OFDM61) | BWN_PHY_OFDM61_10);
BWN_PHY_WRITE(mac, BWN_PHY_DIVSRCHGAINBACK,
(BWN_PHY_READ(mac, BWN_PHY_DIVSRCHGAINBACK) & 0xff00) |
0x15);
if (phy->rev == 2)
BWN_PHY_WRITE(mac, BWN_PHY_ADIVRELATED, 8);
else
BWN_PHY_WRITE(mac, BWN_PHY_ADIVRELATED,
(BWN_PHY_READ(mac, BWN_PHY_ADIVRELATED) & 0xff00) |
8);
}
if (phy->rev >= 6)
BWN_PHY_WRITE(mac, BWN_PHY_OFDM9B, 0xdc);
hf |= BWN_HF_UCODE_ANTDIV_HELPER;
bwn_hf_write(mac, hf);
}
static int
bwn_phy_g_im(struct bwn_mac *mac, int mode)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__));
KASSERT(mode == BWN_IMMODE_NONE, ("%s: fail", __func__));
if (phy->rev == 0 || !phy->gmode)
return (ENODEV);
pg->pg_aci_wlan_automatic = 0;
return (0);
}
static int
bwn_phy_g_recalc_txpwr(struct bwn_mac *mac, int ignore_tssi)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
unsigned int tssi;
int cck, ofdm;
int power;
int rfatt, bbatt;
unsigned int max;
KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__));
cck = bwn_phy_shm_tssi_read(mac, BWN_SHARED_TSSI_CCK);
ofdm = bwn_phy_shm_tssi_read(mac, BWN_SHARED_TSSI_OFDM_G);
if (cck < 0 && ofdm < 0) {
if (ignore_tssi == 0)
return (BWN_TXPWR_RES_DONE);
cck = 0;
ofdm = 0;
}
tssi = (cck < 0) ? ofdm : ((ofdm < 0) ? cck : (cck + ofdm) / 2);
if (pg->pg_avgtssi != 0xff)
tssi = (tssi + pg->pg_avgtssi) / 2;
pg->pg_avgtssi = tssi;
KASSERT(tssi < BWN_TSSI_MAX, ("%s:%d: fail", __func__, __LINE__));
max = siba_sprom_get_maxpwr_bg(sc->sc_dev);
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL)
max -= 3;
if (max >= 120) {
device_printf(sc->sc_dev, "invalid max TX-power value\n");
max = 80;
siba_sprom_set_maxpwr_bg(sc->sc_dev, max);
}
power = MIN(MAX((phy->txpower < 0) ? 0 : (phy->txpower << 2), 0), max) -
(pg->pg_tssi2dbm[MIN(MAX(pg->pg_idletssi - pg->pg_curtssi +
tssi, 0x00), 0x3f)]);
if (power == 0)
return (BWN_TXPWR_RES_DONE);
rfatt = -((power + 7) / 8);
bbatt = (-(power / 2)) - (4 * rfatt);
if ((rfatt == 0) && (bbatt == 0))
return (BWN_TXPWR_RES_DONE);
pg->pg_bbatt_delta = bbatt;
pg->pg_rfatt_delta = rfatt;
return (BWN_TXPWR_RES_NEED_ADJUST);
}
static void
bwn_phy_g_set_txpwr(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
int rfatt, bbatt;
uint8_t txctl;
bwn_mac_suspend(mac);
BWN_ASSERT_LOCKED(sc);
bbatt = pg->pg_bbatt.att;
bbatt += pg->pg_bbatt_delta;
rfatt = pg->pg_rfatt.att;
rfatt += pg->pg_rfatt_delta;
bwn_phy_g_setatt(mac, &bbatt, &rfatt);
txctl = pg->pg_txctl;
if ((phy->rf_ver == 0x2050) && (phy->rf_rev == 2)) {
if (rfatt <= 1) {
if (txctl == 0) {
txctl = BWN_TXCTL_PA2DB | BWN_TXCTL_TXMIX;
rfatt += 2;
bbatt += 2;
} else if (siba_sprom_get_bf_lo(sc->sc_dev) &
BWN_BFL_PACTRL) {
bbatt += 4 * (rfatt - 2);
rfatt = 2;
}
} else if (rfatt > 4 && txctl) {
txctl = 0;
if (bbatt < 3) {
rfatt -= 3;
bbatt += 2;
} else {
rfatt -= 2;
bbatt -= 2;
}
}
}
pg->pg_txctl = txctl;
bwn_phy_g_setatt(mac, &bbatt, &rfatt);
pg->pg_rfatt.att = rfatt;
pg->pg_bbatt.att = bbatt;
DPRINTF(sc, BWN_DEBUG_TXPOW, "%s: adjust TX power\n", __func__);
bwn_phy_lock(mac);
bwn_rf_lock(mac);
bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt,
pg->pg_txctl);
bwn_rf_unlock(mac);
bwn_phy_unlock(mac);
bwn_mac_enable(mac);
}
static void
bwn_phy_g_task_15s(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_txpwr_loctl *lo = &pg->pg_loctl;
unsigned long expire, now;
struct bwn_lo_calib *cal, *tmp;
uint8_t expired = 0;
bwn_mac_suspend(mac);
if (lo == NULL)
goto fail;
BWN_GETTIME(now);
if (bwn_has_hwpctl(mac)) {
expire = now - BWN_LO_PWRVEC_EXPIRE;
if (time_before(lo->pwr_vec_read_time, expire)) {
bwn_lo_get_powervector(mac);
bwn_phy_g_dc_lookup_init(mac, 0);
}
goto fail;
}
expire = now - BWN_LO_CALIB_EXPIRE;
TAILQ_FOREACH_SAFE(cal, &lo->calib_list, list, tmp) {
if (!time_before(cal->calib_time, expire))
continue;
if (BWN_BBATTCMP(&cal->bbatt, &pg->pg_bbatt) &&
BWN_RFATTCMP(&cal->rfatt, &pg->pg_rfatt)) {
KASSERT(!expired, ("%s:%d: fail", __func__, __LINE__));
expired = 1;
}
DPRINTF(sc, BWN_DEBUG_LO, "expired BB %u RF %u %u I %d Q %d\n",
cal->bbatt.att, cal->rfatt.att, cal->rfatt.padmix,
cal->ctl.i, cal->ctl.q);
TAILQ_REMOVE(&lo->calib_list, cal, list);
free(cal, M_DEVBUF);
}
if (expired || TAILQ_EMPTY(&lo->calib_list)) {
cal = bwn_lo_calibset(mac, &pg->pg_bbatt,
&pg->pg_rfatt);
if (cal == NULL) {
device_printf(sc->sc_dev,
"failed to recalibrate LO\n");
goto fail;
}
TAILQ_INSERT_TAIL(&lo->calib_list, cal, list);
bwn_lo_write(mac, &cal->ctl);
}
fail:
bwn_mac_enable(mac);
}
static void
bwn_phy_g_task_60s(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
uint8_t old = phy->chan;
if (!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI))
return;
bwn_mac_suspend(mac);
bwn_nrssi_slope_11g(mac);
if ((phy->rf_ver == 0x2050) && (phy->rf_rev == 8)) {
bwn_switch_channel(mac, (old >= 8) ? 1 : 13);
bwn_switch_channel(mac, old);
}
bwn_mac_enable(mac);
}
static void
bwn_phy_switch_analog(struct bwn_mac *mac, int on)
{
BWN_WRITE_2(mac, BWN_PHY0, on ? 0 : 0xf4);
}
static int
bwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 ||
mac->mac_status < BWN_MAC_STATUS_STARTED) {
ieee80211_free_node(ni);
m_freem(m);
return (ENETDOWN);
}
BWN_LOCK(sc);
if (bwn_tx_isfull(sc, m)) {
ieee80211_free_node(ni);
m_freem(m);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
BWN_UNLOCK(sc);
return (ENOBUFS);
}
if (bwn_tx_start(sc, ni, m) != 0) {
if (ni != NULL)
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
sc->sc_watchdog_timer = 5;
BWN_UNLOCK(sc);
return (0);
}
/*
* Callback from the 802.11 layer to update the slot time
* based on the current setting. We use it to notify the
* firmware of ERP changes and the f/w takes care of things
* like slot time and preamble.
*/
static void
bwn_updateslot(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac;
BWN_LOCK(sc);
- if (ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (sc->sc_flags & BWN_FLAG_RUNNING) {
mac = (struct bwn_mac *)sc->sc_curmac;
bwn_set_slot_time(mac,
(ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20);
}
BWN_UNLOCK(sc);
}
/*
* Callback from the 802.11 layer after a promiscuous mode change.
* Note this interface does not check the operating mode as this
* is an internal callback and we are expected to honor the current
* state (e.g. this is used for setting the interface in promiscuous
* mode when operating in hostap mode to do ACS).
*/
static void
bwn_update_promisc(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
BWN_LOCK(sc);
mac = sc->sc_curmac;
if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) {
- if (ic->ic_ifp->if_flags & IFF_PROMISC)
+ if (ic->ic_promisc > 0)
sc->sc_filters |= BWN_MACCTL_PROMISC;
else
sc->sc_filters &= ~BWN_MACCTL_PROMISC;
bwn_set_opmode(mac);
}
BWN_UNLOCK(sc);
}
/*
* Callback from the 802.11 layer to update WME parameters.
*/
static int
bwn_wme_update(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
struct wmeParams *wmep;
int i;
BWN_LOCK(sc);
mac = sc->sc_curmac;
if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) {
bwn_mac_suspend(mac);
for (i = 0; i < N(sc->sc_wmeParams); i++) {
wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[i];
bwn_wme_loadparams(mac, wmep, bwn_wme_shm_offsets[i]);
}
bwn_mac_enable(mac);
}
BWN_UNLOCK(sc);
return (0);
}
static void
bwn_scan_start(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac;
BWN_LOCK(sc);
mac = sc->sc_curmac;
if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) {
sc->sc_filters |= BWN_MACCTL_BEACON_PROMISC;
bwn_set_opmode(mac);
/* disable CFP update during scan */
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_SKIP_CFP_UPDATE);
}
BWN_UNLOCK(sc);
}
static void
bwn_scan_end(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac;
BWN_LOCK(sc);
mac = sc->sc_curmac;
if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) {
sc->sc_filters &= ~BWN_MACCTL_BEACON_PROMISC;
bwn_set_opmode(mac);
bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_SKIP_CFP_UPDATE);
}
BWN_UNLOCK(sc);
}
static void
bwn_set_channel(struct ieee80211com *ic)
{
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
struct bwn_phy *phy = &mac->mac_phy;
int chan, error;
BWN_LOCK(sc);
error = bwn_switch_band(sc, ic->ic_curchan);
if (error)
goto fail;
bwn_mac_suspend(mac);
bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG);
chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
if (chan != phy->chan)
bwn_switch_channel(mac, chan);
/* TX power level */
if (ic->ic_curchan->ic_maxpower != 0 &&
ic->ic_curchan->ic_maxpower != phy->txpower) {
phy->txpower = ic->ic_curchan->ic_maxpower / 2;
bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME |
BWN_TXPWR_IGNORE_TSSI);
}
bwn_set_txantenna(mac, BWN_ANT_DEFAULT);
if (phy->set_antenna)
phy->set_antenna(mac, BWN_ANT_DEFAULT);
if (sc->sc_rf_enabled != phy->rf_on) {
if (sc->sc_rf_enabled) {
bwn_rf_turnon(mac);
if (!(mac->mac_flags & BWN_MAC_FLAG_RADIO_ON))
device_printf(sc->sc_dev,
"please turn on the RF switch\n");
} else
bwn_rf_turnoff(mac);
}
bwn_mac_enable(mac);
fail:
/*
* Setup radio tap channel freq and flags
*/
sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq =
htole16(ic->ic_curchan->ic_freq);
sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags =
htole16(ic->ic_curchan->ic_flags & 0xffff);
BWN_UNLOCK(sc);
}
static struct ieee80211vap *
bwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
- const uint8_t mac0[IEEE80211_ADDR_LEN])
+ const uint8_t mac[IEEE80211_ADDR_LEN])
{
- struct bwn_softc *sc = ic->ic_softc;
struct ieee80211vap *vap;
struct bwn_vap *bvp;
- uint8_t mac[IEEE80211_ADDR_LEN];
- IEEE80211_ADDR_COPY(mac, mac0);
switch (opmode) {
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
case IEEE80211_M_STA:
case IEEE80211_M_WDS:
case IEEE80211_M_MONITOR:
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
break;
default:
return (NULL);
}
- IEEE80211_ADDR_COPY(sc->sc_macaddr, mac0);
-
- bvp = (struct bwn_vap *) malloc(sizeof(struct bwn_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (bvp == NULL) {
- device_printf(sc->sc_dev, "failed to allocate a buffer\n");
- return (NULL);
- }
+ bvp = malloc(sizeof(struct bwn_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &bvp->bv_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
- IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override with driver methods */
bvp->bv_newstate = vap->iv_newstate;
vap->iv_newstate = bwn_newstate;
/* override max aid so sta's cannot assoc when we're out of sta id's */
vap->iv_max_aid = BWN_STAID_MAX;
ieee80211_ratectl_init(vap);
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
- ieee80211_media_status);
+ ieee80211_media_status, mac);
return (vap);
}
static void
bwn_vap_delete(struct ieee80211vap *vap)
{
struct bwn_vap *bvp = BWN_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(bvp, M_80211_VAP);
}
-static void
-bwn_init(void *arg)
-{
- struct bwn_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- int error = 0;
-
- DPRINTF(sc, BWN_DEBUG_ANY, "%s: if_flags 0x%x\n",
- __func__, ifp->if_flags);
-
- BWN_LOCK(sc);
- error = bwn_init_locked(sc);
- BWN_UNLOCK(sc);
-
- if (error == 0)
- ieee80211_start_all(ic); /* start all vap's */
-}
-
static int
-bwn_init_locked(struct bwn_softc *sc)
+bwn_init(struct bwn_softc *sc)
{
struct bwn_mac *mac;
- struct ifnet *ifp = sc->sc_ifp;
int error;
BWN_ASSERT_LOCKED(sc);
bzero(sc->sc_bssid, IEEE80211_ADDR_LEN);
sc->sc_flags |= BWN_FLAG_NEED_BEACON_TP;
sc->sc_filters = 0;
bwn_wme_clear(sc);
sc->sc_beacons[0] = sc->sc_beacons[1] = 0;
sc->sc_rf_enabled = 1;
mac = sc->sc_curmac;
if (mac->mac_status == BWN_MAC_STATUS_UNINIT) {
error = bwn_core_init(mac);
if (error != 0)
return (error);
}
if (mac->mac_status == BWN_MAC_STATUS_INITED)
bwn_core_start(mac);
bwn_set_opmode(mac);
bwn_set_pretbtt(mac);
bwn_spu_setdelay(mac, 0);
bwn_set_macaddr(mac);
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_flags |= BWN_FLAG_RUNNING;
callout_reset(&sc->sc_rfswitch_ch, hz, bwn_rfswitch, sc);
callout_reset(&sc->sc_watchdog_ch, hz, bwn_watchdog, sc);
return (0);
}
static void
-bwn_stop(struct bwn_softc *sc, int statechg)
+bwn_stop(struct bwn_softc *sc)
{
-
- BWN_LOCK(sc);
- bwn_stop_locked(sc, statechg);
- BWN_UNLOCK(sc);
-}
-
-static void
-bwn_stop_locked(struct bwn_softc *sc, int statechg)
-{
struct bwn_mac *mac = sc->sc_curmac;
- struct ifnet *ifp = sc->sc_ifp;
BWN_ASSERT_LOCKED(sc);
if (mac->mac_status >= BWN_MAC_STATUS_INITED) {
/* XXX FIXME opmode not based on VAP */
bwn_set_opmode(mac);
bwn_set_macaddr(mac);
}
if (mac->mac_status >= BWN_MAC_STATUS_STARTED)
bwn_core_stop(mac);
callout_stop(&sc->sc_led_blink_ch);
sc->sc_led_blinking = 0;
bwn_core_exit(mac);
sc->sc_rf_enabled = 0;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_flags &= ~BWN_FLAG_RUNNING;
}
static void
bwn_wme_clear(struct bwn_softc *sc)
{
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
struct wmeParams *p;
unsigned int i;
KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams),
("%s:%d: fail", __func__, __LINE__));
for (i = 0; i < N(sc->sc_wmeParams); i++) {
p = &(sc->sc_wmeParams[i]);
switch (bwn_wme_shm_offsets[i]) {
case BWN_WME_VOICE:
p->wmep_txopLimit = 0;
p->wmep_aifsn = 2;
/* XXX FIXME: log2(cwmin) */
p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN);
p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX);
break;
case BWN_WME_VIDEO:
p->wmep_txopLimit = 0;
p->wmep_aifsn = 2;
/* XXX FIXME: log2(cwmin) */
p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN);
p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX);
break;
case BWN_WME_BESTEFFORT:
p->wmep_txopLimit = 0;
p->wmep_aifsn = 3;
/* XXX FIXME: log2(cwmin) */
p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN);
p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX);
break;
case BWN_WME_BACKGROUND:
p->wmep_txopLimit = 0;
p->wmep_aifsn = 7;
/* XXX FIXME: log2(cwmin) */
p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN);
p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
}
static int
bwn_core_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint64_t hf;
int error;
KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT,
("%s:%d: fail", __func__, __LINE__));
siba_powerup(sc->sc_dev, 0);
if (!siba_dev_isup(sc->sc_dev))
bwn_reset_core(mac,
mac->mac_phy.gmode ? BWN_TGSLOW_SUPPORT_G : 0);
mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID;
mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON;
mac->mac_phy.hwpctl = (bwn_hwpctl) ? 1 : 0;
BWN_GETTIME(mac->mac_phy.nexttime);
mac->mac_phy.txerrors = BWN_TXERROR_MAX;
bzero(&mac->mac_stats, sizeof(mac->mac_stats));
mac->mac_stats.link_noise = -95;
mac->mac_reason_intr = 0;
bzero(mac->mac_reason, sizeof(mac->mac_reason));
mac->mac_intr_mask = BWN_INTR_MASKTEMPLATE;
#ifdef BWN_DEBUG
if (sc->sc_debug & BWN_DEBUG_XMIT)
mac->mac_intr_mask &= ~BWN_INTR_PHY_TXERR;
#endif
mac->mac_suspended = 1;
mac->mac_task_state = 0;
memset(&mac->mac_noise, 0, sizeof(mac->mac_noise));
mac->mac_phy.init_pre(mac);
siba_pcicore_intr(sc->sc_dev);
siba_fix_imcfglobug(sc->sc_dev);
bwn_bt_disable(mac);
if (mac->mac_phy.prepare_hw) {
error = mac->mac_phy.prepare_hw(mac);
if (error)
goto fail0;
}
error = bwn_chip_init(mac);
if (error)
goto fail0;
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_COREREV,
siba_get_revid(sc->sc_dev));
hf = bwn_hf_read(mac);
if (mac->mac_phy.type == BWN_PHYTYPE_G) {
hf |= BWN_HF_GPHY_SYM_WORKAROUND;
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL)
hf |= BWN_HF_PAGAINBOOST_OFDM_ON;
if (mac->mac_phy.rev == 1)
hf |= BWN_HF_GPHY_DC_CANCELFILTER;
}
if (mac->mac_phy.rf_ver == 0x2050) {
if (mac->mac_phy.rf_rev < 6)
hf |= BWN_HF_FORCE_VCO_RECALC;
if (mac->mac_phy.rf_rev == 6)
hf |= BWN_HF_4318_TSSI;
}
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW)
hf |= BWN_HF_SLOWCLOCK_REQ_OFF;
if ((siba_get_type(sc->sc_dev) == SIBA_TYPE_PCI) &&
(siba_get_pcicore_revid(sc->sc_dev) <= 10))
hf |= BWN_HF_PCI_SLOWCLOCK_WORKAROUND;
hf &= ~BWN_HF_SKIP_CFP_UPDATE;
bwn_hf_write(mac, hf);
bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SHORT_RETRY_FALLBACK, 3);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_LONG_RETRY_FALLBACK, 2);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_MAXTIME, 1);
bwn_rate_init(mac);
bwn_set_phytxctl(mac);
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MIN,
(mac->mac_phy.type == BWN_PHYTYPE_B) ? 0x1f : 0xf);
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MAX, 0x3ff);
if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0)
bwn_pio_init(mac);
else
bwn_dma_init(mac);
bwn_wme_init(mac);
bwn_spu_setdelay(mac, 1);
bwn_bt_enable(mac);
siba_powerup(sc->sc_dev,
!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW));
bwn_set_macaddr(mac);
bwn_crypt_init(mac);
/* XXX LED initializatin */
mac->mac_status = BWN_MAC_STATUS_INITED;
return (error);
fail0:
siba_powerdown(sc->sc_dev);
KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT,
("%s:%d: fail", __func__, __LINE__));
return (error);
}
static void
bwn_core_start(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t tmp;
KASSERT(mac->mac_status == BWN_MAC_STATUS_INITED,
("%s:%d: fail", __func__, __LINE__));
if (siba_get_revid(sc->sc_dev) < 5)
return;
while (1) {
tmp = BWN_READ_4(mac, BWN_XMITSTAT_0);
if (!(tmp & 0x00000001))
break;
tmp = BWN_READ_4(mac, BWN_XMITSTAT_1);
}
bwn_mac_enable(mac);
BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask);
callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac);
mac->mac_status = BWN_MAC_STATUS_STARTED;
}
static void
bwn_core_exit(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t macctl;
BWN_ASSERT_LOCKED(mac->mac_sc);
KASSERT(mac->mac_status <= BWN_MAC_STATUS_INITED,
("%s:%d: fail", __func__, __LINE__));
if (mac->mac_status != BWN_MAC_STATUS_INITED)
return;
mac->mac_status = BWN_MAC_STATUS_UNINIT;
macctl = BWN_READ_4(mac, BWN_MACCTL);
macctl &= ~BWN_MACCTL_MCODE_RUN;
macctl |= BWN_MACCTL_MCODE_JMP0;
BWN_WRITE_4(mac, BWN_MACCTL, macctl);
bwn_dma_stop(mac);
bwn_pio_stop(mac);
bwn_chip_exit(mac);
mac->mac_phy.switch_analog(mac, 0);
siba_dev_down(sc->sc_dev, 0);
siba_powerdown(sc->sc_dev);
}
static void
bwn_bt_disable(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
(void)sc;
/* XXX do nothing yet */
}
static int
bwn_chip_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
uint32_t macctl;
int error;
macctl = BWN_MACCTL_IHR_ON | BWN_MACCTL_SHM_ON | BWN_MACCTL_STA;
if (phy->gmode)
macctl |= BWN_MACCTL_GMODE;
BWN_WRITE_4(mac, BWN_MACCTL, macctl);
error = bwn_fw_fillinfo(mac);
if (error)
return (error);
error = bwn_fw_loaducode(mac);
if (error)
return (error);
error = bwn_gpio_init(mac);
if (error)
return (error);
error = bwn_fw_loadinitvals(mac);
if (error) {
siba_gpio_set(sc->sc_dev, 0);
return (error);
}
phy->switch_analog(mac, 1);
error = bwn_phy_init(mac);
if (error) {
siba_gpio_set(sc->sc_dev, 0);
return (error);
}
if (phy->set_im)
phy->set_im(mac, BWN_IMMODE_NONE);
if (phy->set_antenna)
phy->set_antenna(mac, BWN_ANT_DEFAULT);
bwn_set_txantenna(mac, BWN_ANT_DEFAULT);
if (phy->type == BWN_PHYTYPE_B)
BWN_WRITE_2(mac, 0x005e, BWN_READ_2(mac, 0x005e) | 0x0004);
BWN_WRITE_4(mac, 0x0100, 0x01000000);
if (siba_get_revid(sc->sc_dev) < 5)
BWN_WRITE_4(mac, 0x010c, 0x01000000);
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_STA);
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_STA);
bwn_shm_write_2(mac, BWN_SHARED, 0x0074, 0x0000);
bwn_set_opmode(mac);
if (siba_get_revid(sc->sc_dev) < 3) {
BWN_WRITE_2(mac, 0x060e, 0x0000);
BWN_WRITE_2(mac, 0x0610, 0x8000);
BWN_WRITE_2(mac, 0x0604, 0x0000);
BWN_WRITE_2(mac, 0x0606, 0x0200);
} else {
BWN_WRITE_4(mac, 0x0188, 0x80000000);
BWN_WRITE_4(mac, 0x018c, 0x02000000);
}
BWN_WRITE_4(mac, BWN_INTR_REASON, 0x00004000);
BWN_WRITE_4(mac, BWN_DMA0_INTR_MASK, 0x0001dc00);
BWN_WRITE_4(mac, BWN_DMA1_INTR_MASK, 0x0000dc00);
BWN_WRITE_4(mac, BWN_DMA2_INTR_MASK, 0x0000dc00);
BWN_WRITE_4(mac, BWN_DMA3_INTR_MASK, 0x0001dc00);
BWN_WRITE_4(mac, BWN_DMA4_INTR_MASK, 0x0000dc00);
BWN_WRITE_4(mac, BWN_DMA5_INTR_MASK, 0x0000dc00);
siba_write_4(sc->sc_dev, SIBA_TGSLOW,
siba_read_4(sc->sc_dev, SIBA_TGSLOW) | 0x00100000);
BWN_WRITE_2(mac, BWN_POWERUP_DELAY, siba_get_cc_powerdelay(sc->sc_dev));
return (error);
}
/* read hostflags */
static uint64_t
bwn_hf_read(struct bwn_mac *mac)
{
uint64_t ret;
ret = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFHI);
ret <<= 16;
ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFMI);
ret <<= 16;
ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO);
return (ret);
}
static void
bwn_hf_write(struct bwn_mac *mac, uint64_t value)
{
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFLO,
(value & 0x00000000ffffull));
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFMI,
(value & 0x0000ffff0000ull) >> 16);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFHI,
(value & 0xffff00000000ULL) >> 32);
}
static void
bwn_set_txretry(struct bwn_mac *mac, int s, int l)
{
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_SHORT_RETRY, MIN(s, 0xf));
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_LONG_RETRY, MIN(l, 0xf));
}
static void
bwn_rate_init(struct bwn_mac *mac)
{
switch (mac->mac_phy.type) {
case BWN_PHYTYPE_A:
case BWN_PHYTYPE_G:
case BWN_PHYTYPE_LP:
case BWN_PHYTYPE_N:
bwn_rate_write(mac, BWN_OFDM_RATE_6MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_12MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_18MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_24MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_36MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_48MB, 1);
bwn_rate_write(mac, BWN_OFDM_RATE_54MB, 1);
if (mac->mac_phy.type == BWN_PHYTYPE_A)
break;
/* FALLTHROUGH */
case BWN_PHYTYPE_B:
bwn_rate_write(mac, BWN_CCK_RATE_1MB, 0);
bwn_rate_write(mac, BWN_CCK_RATE_2MB, 0);
bwn_rate_write(mac, BWN_CCK_RATE_5MB, 0);
bwn_rate_write(mac, BWN_CCK_RATE_11MB, 0);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
static void
bwn_rate_write(struct bwn_mac *mac, uint16_t rate, int ofdm)
{
uint16_t offset;
if (ofdm) {
offset = 0x480;
offset += (bwn_plcp_getofdm(rate) & 0x000f) * 2;
} else {
offset = 0x4c0;
offset += (bwn_plcp_getcck(rate) & 0x000f) * 2;
}
bwn_shm_write_2(mac, BWN_SHARED, offset + 0x20,
bwn_shm_read_2(mac, BWN_SHARED, offset));
}
static uint8_t
bwn_plcp_getcck(const uint8_t bitrate)
{
switch (bitrate) {
case BWN_CCK_RATE_1MB:
return (0x0a);
case BWN_CCK_RATE_2MB:
return (0x14);
case BWN_CCK_RATE_5MB:
return (0x37);
case BWN_CCK_RATE_11MB:
return (0x6e);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (0);
}
static uint8_t
bwn_plcp_getofdm(const uint8_t bitrate)
{
switch (bitrate) {
case BWN_OFDM_RATE_6MB:
return (0xb);
case BWN_OFDM_RATE_9MB:
return (0xf);
case BWN_OFDM_RATE_12MB:
return (0xa);
case BWN_OFDM_RATE_18MB:
return (0xe);
case BWN_OFDM_RATE_24MB:
return (0x9);
case BWN_OFDM_RATE_36MB:
return (0xd);
case BWN_OFDM_RATE_48MB:
return (0x8);
case BWN_OFDM_RATE_54MB:
return (0xc);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (0);
}
static void
bwn_set_phytxctl(struct bwn_mac *mac)
{
uint16_t ctl;
ctl = (BWN_TX_PHY_ENC_CCK | BWN_TX_PHY_ANT01AUTO |
BWN_TX_PHY_TXPWR);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_BEACON_PHYCTL, ctl);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, ctl);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, ctl);
}
static void
bwn_pio_init(struct bwn_mac *mac)
{
struct bwn_pio *pio = &mac->mac_method.pio;
BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL)
& ~BWN_MACCTL_BIGENDIAN);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RX_PADOFFSET, 0);
bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BK], 0);
bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BE], 1);
bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VI], 2);
bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VO], 3);
bwn_pio_set_txqueue(mac, &pio->mcast, 4);
bwn_pio_setupqueue_rx(mac, &pio->rx, 0);
}
static void
bwn_pio_set_txqueue(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
int index)
{
struct bwn_pio_txpkt *tp;
struct bwn_softc *sc = mac->mac_sc;
unsigned int i;
tq->tq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_TXQOFFSET(mac);
tq->tq_index = index;
tq->tq_free = BWN_PIO_MAX_TXPACKETS;
if (siba_get_revid(sc->sc_dev) >= 8)
tq->tq_size = 1920;
else {
tq->tq_size = bwn_pio_read_2(mac, tq, BWN_PIO_TXQBUFSIZE);
tq->tq_size -= 80;
}
TAILQ_INIT(&tq->tq_pktlist);
for (i = 0; i < N(tq->tq_pkts); i++) {
tp = &(tq->tq_pkts[i]);
tp->tp_index = i;
tp->tp_queue = tq;
TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list);
}
}
static uint16_t
bwn_pio_idx2base(struct bwn_mac *mac, int index)
{
struct bwn_softc *sc = mac->mac_sc;
static const uint16_t bases[] = {
BWN_PIO_BASE0,
BWN_PIO_BASE1,
BWN_PIO_BASE2,
BWN_PIO_BASE3,
BWN_PIO_BASE4,
BWN_PIO_BASE5,
BWN_PIO_BASE6,
BWN_PIO_BASE7,
};
static const uint16_t bases_rev11[] = {
BWN_PIO11_BASE0,
BWN_PIO11_BASE1,
BWN_PIO11_BASE2,
BWN_PIO11_BASE3,
BWN_PIO11_BASE4,
BWN_PIO11_BASE5,
};
if (siba_get_revid(sc->sc_dev) >= 11) {
if (index >= N(bases_rev11))
device_printf(sc->sc_dev, "%s: warning\n", __func__);
return (bases_rev11[index]);
}
if (index >= N(bases))
device_printf(sc->sc_dev, "%s: warning\n", __func__);
return (bases[index]);
}
static void
bwn_pio_setupqueue_rx(struct bwn_mac *mac, struct bwn_pio_rxqueue *prq,
int index)
{
struct bwn_softc *sc = mac->mac_sc;
prq->prq_mac = mac;
prq->prq_rev = siba_get_revid(sc->sc_dev);
prq->prq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_RXQOFFSET(mac);
bwn_dma_rxdirectfifo(mac, index, 1);
}
static void
bwn_destroy_pioqueue_tx(struct bwn_pio_txqueue *tq)
{
if (tq == NULL)
return;
bwn_pio_cancel_tx_packets(tq);
}
static void
bwn_destroy_queue_tx(struct bwn_pio_txqueue *pio)
{
bwn_destroy_pioqueue_tx(pio);
}
static uint16_t
bwn_pio_read_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint16_t offset)
{
return (BWN_READ_2(mac, tq->tq_base + offset));
}
static void
bwn_dma_rxdirectfifo(struct bwn_mac *mac, int idx, uint8_t enable)
{
uint32_t ctl;
int type;
uint16_t base;
type = bwn_dma_mask2type(bwn_dma_mask(mac));
base = bwn_dma_base(type, idx);
if (type == BWN_DMA_64BIT) {
ctl = BWN_READ_4(mac, base + BWN_DMA64_RXCTL);
ctl &= ~BWN_DMA64_RXDIRECTFIFO;
if (enable)
ctl |= BWN_DMA64_RXDIRECTFIFO;
BWN_WRITE_4(mac, base + BWN_DMA64_RXCTL, ctl);
} else {
ctl = BWN_READ_4(mac, base + BWN_DMA32_RXCTL);
ctl &= ~BWN_DMA32_RXDIRECTFIFO;
if (enable)
ctl |= BWN_DMA32_RXDIRECTFIFO;
BWN_WRITE_4(mac, base + BWN_DMA32_RXCTL, ctl);
}
}
static uint64_t
bwn_dma_mask(struct bwn_mac *mac)
{
uint32_t tmp;
uint16_t base;
tmp = BWN_READ_4(mac, SIBA_TGSHIGH);
if (tmp & SIBA_TGSHIGH_DMA64)
return (BWN_DMA_BIT_MASK(64));
base = bwn_dma_base(0, 0);
BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK);
tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL);
if (tmp & BWN_DMA32_TXADDREXT_MASK)
return (BWN_DMA_BIT_MASK(32));
return (BWN_DMA_BIT_MASK(30));
}
static int
bwn_dma_mask2type(uint64_t dmamask)
{
if (dmamask == BWN_DMA_BIT_MASK(30))
return (BWN_DMA_30BIT);
if (dmamask == BWN_DMA_BIT_MASK(32))
return (BWN_DMA_32BIT);
if (dmamask == BWN_DMA_BIT_MASK(64))
return (BWN_DMA_64BIT);
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (BWN_DMA_30BIT);
}
static void
bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *tq)
{
struct bwn_pio_txpkt *tp;
unsigned int i;
for (i = 0; i < N(tq->tq_pkts); i++) {
tp = &(tq->tq_pkts[i]);
if (tp->tp_m) {
m_freem(tp->tp_m);
tp->tp_m = NULL;
}
}
}
static uint16_t
bwn_dma_base(int type, int controller_idx)
{
static const uint16_t map64[] = {
BWN_DMA64_BASE0,
BWN_DMA64_BASE1,
BWN_DMA64_BASE2,
BWN_DMA64_BASE3,
BWN_DMA64_BASE4,
BWN_DMA64_BASE5,
};
static const uint16_t map32[] = {
BWN_DMA32_BASE0,
BWN_DMA32_BASE1,
BWN_DMA32_BASE2,
BWN_DMA32_BASE3,
BWN_DMA32_BASE4,
BWN_DMA32_BASE5,
};
if (type == BWN_DMA_64BIT) {
KASSERT(controller_idx >= 0 && controller_idx < N(map64),
("%s:%d: fail", __func__, __LINE__));
return (map64[controller_idx]);
}
KASSERT(controller_idx >= 0 && controller_idx < N(map32),
("%s:%d: fail", __func__, __LINE__));
return (map32[controller_idx]);
}
static void
bwn_dma_init(struct bwn_mac *mac)
{
struct bwn_dma *dma = &mac->mac_method.dma;
/* setup TX DMA channels. */
bwn_dma_setup(dma->wme[WME_AC_BK]);
bwn_dma_setup(dma->wme[WME_AC_BE]);
bwn_dma_setup(dma->wme[WME_AC_VI]);
bwn_dma_setup(dma->wme[WME_AC_VO]);
bwn_dma_setup(dma->mcast);
/* setup RX DMA channel. */
bwn_dma_setup(dma->rx);
}
static struct bwn_dma_ring *
bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index,
int for_tx, int type)
{
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dma_ring *dr;
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *mt;
struct bwn_softc *sc = mac->mac_sc;
int error, i;
dr = malloc(sizeof(*dr), M_DEVBUF, M_NOWAIT | M_ZERO);
if (dr == NULL)
goto out;
dr->dr_numslots = BWN_RXRING_SLOTS;
if (for_tx)
dr->dr_numslots = BWN_TXRING_SLOTS;
dr->dr_meta = malloc(dr->dr_numslots * sizeof(struct bwn_dmadesc_meta),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (dr->dr_meta == NULL)
goto fail0;
dr->dr_type = type;
dr->dr_mac = mac;
dr->dr_base = bwn_dma_base(type, controller_index);
dr->dr_index = controller_index;
if (type == BWN_DMA_64BIT) {
dr->getdesc = bwn_dma_64_getdesc;
dr->setdesc = bwn_dma_64_setdesc;
dr->start_transfer = bwn_dma_64_start_transfer;
dr->suspend = bwn_dma_64_suspend;
dr->resume = bwn_dma_64_resume;
dr->get_curslot = bwn_dma_64_get_curslot;
dr->set_curslot = bwn_dma_64_set_curslot;
} else {
dr->getdesc = bwn_dma_32_getdesc;
dr->setdesc = bwn_dma_32_setdesc;
dr->start_transfer = bwn_dma_32_start_transfer;
dr->suspend = bwn_dma_32_suspend;
dr->resume = bwn_dma_32_resume;
dr->get_curslot = bwn_dma_32_get_curslot;
dr->set_curslot = bwn_dma_32_set_curslot;
}
if (for_tx) {
dr->dr_tx = 1;
dr->dr_curslot = -1;
} else {
if (dr->dr_index == 0) {
dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE;
dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET;
} else
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
error = bwn_dma_allocringmemory(dr);
if (error)
goto fail2;
if (for_tx) {
/*
* Assumption: BWN_TXRING_SLOTS can be divided by
* BWN_TX_SLOTS_PER_FRAME
*/
KASSERT(BWN_TXRING_SLOTS % BWN_TX_SLOTS_PER_FRAME == 0,
("%s:%d: fail", __func__, __LINE__));
dr->dr_txhdr_cache =
malloc((dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) *
BWN_HDRSIZE(mac), M_DEVBUF, M_NOWAIT | M_ZERO);
KASSERT(dr->dr_txhdr_cache != NULL,
("%s:%d: fail", __func__, __LINE__));
/*
* Create TX ring DMA stuffs
*/
error = bus_dma_tag_create(dma->parent_dtag,
BWN_ALIGN, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
BWN_HDRSIZE(mac),
1,
BUS_SPACE_MAXSIZE_32BIT,
0,
NULL, NULL,
&dr->dr_txring_dtag);
if (error) {
device_printf(sc->sc_dev,
"can't create TX ring DMA tag: TODO frees\n");
goto fail1;
}
for (i = 0; i < dr->dr_numslots; i += 2) {
dr->getdesc(dr, i, &desc, &mt);
mt->mt_txtype = BWN_DMADESC_METATYPE_HEADER;
mt->mt_m = NULL;
mt->mt_ni = NULL;
mt->mt_islast = 0;
error = bus_dmamap_create(dr->dr_txring_dtag, 0,
&mt->mt_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't create RX buf DMA map\n");
goto fail1;
}
dr->getdesc(dr, i + 1, &desc, &mt);
mt->mt_txtype = BWN_DMADESC_METATYPE_BODY;
mt->mt_m = NULL;
mt->mt_ni = NULL;
mt->mt_islast = 1;
error = bus_dmamap_create(dma->txbuf_dtag, 0,
&mt->mt_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't create RX buf DMA map\n");
goto fail1;
}
}
} else {
error = bus_dmamap_create(dma->rxbuf_dtag, 0,
&dr->dr_spare_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't create RX buf DMA map\n");
goto out; /* XXX wrong! */
}
for (i = 0; i < dr->dr_numslots; i++) {
dr->getdesc(dr, i, &desc, &mt);
error = bus_dmamap_create(dma->rxbuf_dtag, 0,
&mt->mt_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't create RX buf DMA map\n");
goto out; /* XXX wrong! */
}
error = bwn_dma_newbuf(dr, desc, mt, 1);
if (error) {
device_printf(sc->sc_dev,
"failed to allocate RX buf\n");
goto out; /* XXX wrong! */
}
}
bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap,
BUS_DMASYNC_PREWRITE);
dr->dr_usedslot = dr->dr_numslots;
}
out:
return (dr);
fail2:
free(dr->dr_txhdr_cache, M_DEVBUF);
fail1:
free(dr->dr_meta, M_DEVBUF);
fail0:
free(dr, M_DEVBUF);
return (NULL);
}
static void
bwn_dma_ringfree(struct bwn_dma_ring **dr)
{
if (dr == NULL)
return;
bwn_dma_free_descbufs(*dr);
bwn_dma_free_ringmemory(*dr);
free((*dr)->dr_txhdr_cache, M_DEVBUF);
free((*dr)->dr_meta, M_DEVBUF);
free(*dr, M_DEVBUF);
*dr = NULL;
}
static void
bwn_dma_32_getdesc(struct bwn_dma_ring *dr, int slot,
struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta)
{
struct bwn_dmadesc32 *desc;
*meta = &(dr->dr_meta[slot]);
desc = dr->dr_ring_descbase;
desc = &(desc[slot]);
*gdesc = (struct bwn_dmadesc_generic *)desc;
}
static void
bwn_dma_32_setdesc(struct bwn_dma_ring *dr,
struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize,
int start, int end, int irq)
{
struct bwn_dmadesc32 *descbase = dr->dr_ring_descbase;
struct bwn_softc *sc = dr->dr_mac->mac_sc;
uint32_t addr, addrext, ctl;
int slot;
slot = (int)(&(desc->dma.dma32) - descbase);
KASSERT(slot >= 0 && slot < dr->dr_numslots,
("%s:%d: fail", __func__, __LINE__));
addr = (uint32_t) (dmaaddr & ~SIBA_DMA_TRANSLATION_MASK);
addrext = (uint32_t) (dmaaddr & SIBA_DMA_TRANSLATION_MASK) >> 30;
addr |= siba_dma_translation(sc->sc_dev);
ctl = bufsize & BWN_DMA32_DCTL_BYTECNT;
if (slot == dr->dr_numslots - 1)
ctl |= BWN_DMA32_DCTL_DTABLEEND;
if (start)
ctl |= BWN_DMA32_DCTL_FRAMESTART;
if (end)
ctl |= BWN_DMA32_DCTL_FRAMEEND;
if (irq)
ctl |= BWN_DMA32_DCTL_IRQ;
ctl |= (addrext << BWN_DMA32_DCTL_ADDREXT_SHIFT)
& BWN_DMA32_DCTL_ADDREXT_MASK;
desc->dma.dma32.control = htole32(ctl);
desc->dma.dma32.address = htole32(addr);
}
static void
bwn_dma_32_start_transfer(struct bwn_dma_ring *dr, int slot)
{
BWN_DMA_WRITE(dr, BWN_DMA32_TXINDEX,
(uint32_t)(slot * sizeof(struct bwn_dmadesc32)));
}
static void
bwn_dma_32_suspend(struct bwn_dma_ring *dr)
{
BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL,
BWN_DMA_READ(dr, BWN_DMA32_TXCTL) | BWN_DMA32_TXSUSPEND);
}
static void
bwn_dma_32_resume(struct bwn_dma_ring *dr)
{
BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL,
BWN_DMA_READ(dr, BWN_DMA32_TXCTL) & ~BWN_DMA32_TXSUSPEND);
}
static int
bwn_dma_32_get_curslot(struct bwn_dma_ring *dr)
{
uint32_t val;
val = BWN_DMA_READ(dr, BWN_DMA32_RXSTATUS);
val &= BWN_DMA32_RXDPTR;
return (val / sizeof(struct bwn_dmadesc32));
}
static void
bwn_dma_32_set_curslot(struct bwn_dma_ring *dr, int slot)
{
BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX,
(uint32_t) (slot * sizeof(struct bwn_dmadesc32)));
}
static void
bwn_dma_64_getdesc(struct bwn_dma_ring *dr, int slot,
struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta)
{
struct bwn_dmadesc64 *desc;
*meta = &(dr->dr_meta[slot]);
desc = dr->dr_ring_descbase;
desc = &(desc[slot]);
*gdesc = (struct bwn_dmadesc_generic *)desc;
}
static void
bwn_dma_64_setdesc(struct bwn_dma_ring *dr,
struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize,
int start, int end, int irq)
{
struct bwn_dmadesc64 *descbase = dr->dr_ring_descbase;
struct bwn_softc *sc = dr->dr_mac->mac_sc;
int slot;
uint32_t ctl0 = 0, ctl1 = 0;
uint32_t addrlo, addrhi;
uint32_t addrext;
slot = (int)(&(desc->dma.dma64) - descbase);
KASSERT(slot >= 0 && slot < dr->dr_numslots,
("%s:%d: fail", __func__, __LINE__));
addrlo = (uint32_t) (dmaaddr & 0xffffffff);
addrhi = (((uint64_t) dmaaddr >> 32) & ~SIBA_DMA_TRANSLATION_MASK);
addrext = (((uint64_t) dmaaddr >> 32) & SIBA_DMA_TRANSLATION_MASK) >>
30;
addrhi |= (siba_dma_translation(sc->sc_dev) << 1);
if (slot == dr->dr_numslots - 1)
ctl0 |= BWN_DMA64_DCTL0_DTABLEEND;
if (start)
ctl0 |= BWN_DMA64_DCTL0_FRAMESTART;
if (end)
ctl0 |= BWN_DMA64_DCTL0_FRAMEEND;
if (irq)
ctl0 |= BWN_DMA64_DCTL0_IRQ;
ctl1 |= bufsize & BWN_DMA64_DCTL1_BYTECNT;
ctl1 |= (addrext << BWN_DMA64_DCTL1_ADDREXT_SHIFT)
& BWN_DMA64_DCTL1_ADDREXT_MASK;
desc->dma.dma64.control0 = htole32(ctl0);
desc->dma.dma64.control1 = htole32(ctl1);
desc->dma.dma64.address_low = htole32(addrlo);
desc->dma.dma64.address_high = htole32(addrhi);
}
static void
bwn_dma_64_start_transfer(struct bwn_dma_ring *dr, int slot)
{
BWN_DMA_WRITE(dr, BWN_DMA64_TXINDEX,
(uint32_t)(slot * sizeof(struct bwn_dmadesc64)));
}
static void
bwn_dma_64_suspend(struct bwn_dma_ring *dr)
{
BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL,
BWN_DMA_READ(dr, BWN_DMA64_TXCTL) | BWN_DMA64_TXSUSPEND);
}
static void
bwn_dma_64_resume(struct bwn_dma_ring *dr)
{
BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL,
BWN_DMA_READ(dr, BWN_DMA64_TXCTL) & ~BWN_DMA64_TXSUSPEND);
}
static int
bwn_dma_64_get_curslot(struct bwn_dma_ring *dr)
{
uint32_t val;
val = BWN_DMA_READ(dr, BWN_DMA64_RXSTATUS);
val &= BWN_DMA64_RXSTATDPTR;
return (val / sizeof(struct bwn_dmadesc64));
}
static void
bwn_dma_64_set_curslot(struct bwn_dma_ring *dr, int slot)
{
BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX,
(uint32_t)(slot * sizeof(struct bwn_dmadesc64)));
}
static int
bwn_dma_allocringmemory(struct bwn_dma_ring *dr)
{
struct bwn_mac *mac = dr->dr_mac;
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_softc *sc = mac->mac_sc;
int error;
error = bus_dma_tag_create(dma->parent_dtag,
BWN_ALIGN, 0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
BWN_DMA_RINGMEMSIZE,
1,
BUS_SPACE_MAXSIZE_32BIT,
0,
NULL, NULL,
&dr->dr_ring_dtag);
if (error) {
device_printf(sc->sc_dev,
"can't create TX ring DMA tag: TODO frees\n");
return (-1);
}
error = bus_dmamem_alloc(dr->dr_ring_dtag,
&dr->dr_ring_descbase, BUS_DMA_WAITOK | BUS_DMA_ZERO,
&dr->dr_ring_dmap);
if (error) {
device_printf(sc->sc_dev,
"can't allocate DMA mem: TODO frees\n");
return (-1);
}
error = bus_dmamap_load(dr->dr_ring_dtag, dr->dr_ring_dmap,
dr->dr_ring_descbase, BWN_DMA_RINGMEMSIZE,
bwn_dma_ring_addr, &dr->dr_ring_dmabase, BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->sc_dev,
"can't load DMA mem: TODO free\n");
return (-1);
}
return (0);
}
static void
bwn_dma_setup(struct bwn_dma_ring *dr)
{
struct bwn_softc *sc = dr->dr_mac->mac_sc;
uint64_t ring64;
uint32_t addrext, ring32, value;
uint32_t trans = siba_dma_translation(sc->sc_dev);
if (dr->dr_tx) {
dr->dr_curslot = -1;
if (dr->dr_type == BWN_DMA_64BIT) {
ring64 = (uint64_t)(dr->dr_ring_dmabase);
addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK)
>> 30;
value = BWN_DMA64_TXENABLE;
value |= (addrext << BWN_DMA64_TXADDREXT_SHIFT)
& BWN_DMA64_TXADDREXT_MASK;
BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, value);
BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO,
(ring64 & 0xffffffff));
BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI,
((ring64 >> 32) &
~SIBA_DMA_TRANSLATION_MASK) | (trans << 1));
} else {
ring32 = (uint32_t)(dr->dr_ring_dmabase);
addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30;
value = BWN_DMA32_TXENABLE;
value |= (addrext << BWN_DMA32_TXADDREXT_SHIFT)
& BWN_DMA32_TXADDREXT_MASK;
BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, value);
BWN_DMA_WRITE(dr, BWN_DMA32_TXRING,
(ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans);
}
return;
}
/*
* set for RX
*/
dr->dr_usedslot = dr->dr_numslots;
if (dr->dr_type == BWN_DMA_64BIT) {
ring64 = (uint64_t)(dr->dr_ring_dmabase);
addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30;
value = (dr->dr_frameoffset << BWN_DMA64_RXFROFF_SHIFT);
value |= BWN_DMA64_RXENABLE;
value |= (addrext << BWN_DMA64_RXADDREXT_SHIFT)
& BWN_DMA64_RXADDREXT_MASK;
BWN_DMA_WRITE(dr, BWN_DMA64_RXCTL, value);
BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, (ring64 & 0xffffffff));
BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI,
((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK)
| (trans << 1));
BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, dr->dr_numslots *
sizeof(struct bwn_dmadesc64));
} else {
ring32 = (uint32_t)(dr->dr_ring_dmabase);
addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30;
value = (dr->dr_frameoffset << BWN_DMA32_RXFROFF_SHIFT);
value |= BWN_DMA32_RXENABLE;
value |= (addrext << BWN_DMA32_RXADDREXT_SHIFT)
& BWN_DMA32_RXADDREXT_MASK;
BWN_DMA_WRITE(dr, BWN_DMA32_RXCTL, value);
BWN_DMA_WRITE(dr, BWN_DMA32_RXRING,
(ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans);
BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, dr->dr_numslots *
sizeof(struct bwn_dmadesc32));
}
}
static void
bwn_dma_free_ringmemory(struct bwn_dma_ring *dr)
{
bus_dmamap_unload(dr->dr_ring_dtag, dr->dr_ring_dmap);
bus_dmamem_free(dr->dr_ring_dtag, dr->dr_ring_descbase,
dr->dr_ring_dmap);
}
static void
bwn_dma_cleanup(struct bwn_dma_ring *dr)
{
if (dr->dr_tx) {
bwn_dma_tx_reset(dr->dr_mac, dr->dr_base, dr->dr_type);
if (dr->dr_type == BWN_DMA_64BIT) {
BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, 0);
BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, 0);
} else
BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, 0);
} else {
bwn_dma_rx_reset(dr->dr_mac, dr->dr_base, dr->dr_type);
if (dr->dr_type == BWN_DMA_64BIT) {
BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, 0);
BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, 0);
} else
BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, 0);
}
}
static void
bwn_dma_free_descbufs(struct bwn_dma_ring *dr)
{
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *meta;
struct bwn_mac *mac = dr->dr_mac;
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_softc *sc = mac->mac_sc;
int i;
if (!dr->dr_usedslot)
return;
for (i = 0; i < dr->dr_numslots; i++) {
dr->getdesc(dr, i, &desc, &meta);
if (meta->mt_m == NULL) {
if (!dr->dr_tx)
device_printf(sc->sc_dev, "%s: not TX?\n",
__func__);
continue;
}
if (dr->dr_tx) {
if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER)
bus_dmamap_unload(dr->dr_txring_dtag,
meta->mt_dmap);
else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY)
bus_dmamap_unload(dma->txbuf_dtag,
meta->mt_dmap);
} else
bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap);
bwn_dma_free_descbuf(dr, meta);
}
}
static int
bwn_dma_tx_reset(struct bwn_mac *mac, uint16_t base,
int type)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t value;
int i;
uint16_t offset;
for (i = 0; i < 10; i++) {
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS :
BWN_DMA32_TXSTATUS;
value = BWN_READ_4(mac, base + offset);
if (type == BWN_DMA_64BIT) {
value &= BWN_DMA64_TXSTAT;
if (value == BWN_DMA64_TXSTAT_DISABLED ||
value == BWN_DMA64_TXSTAT_IDLEWAIT ||
value == BWN_DMA64_TXSTAT_STOPPED)
break;
} else {
value &= BWN_DMA32_TXSTATE;
if (value == BWN_DMA32_TXSTAT_DISABLED ||
value == BWN_DMA32_TXSTAT_IDLEWAIT ||
value == BWN_DMA32_TXSTAT_STOPPED)
break;
}
DELAY(1000);
}
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXCTL : BWN_DMA32_TXCTL;
BWN_WRITE_4(mac, base + offset, 0);
for (i = 0; i < 10; i++) {
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS :
BWN_DMA32_TXSTATUS;
value = BWN_READ_4(mac, base + offset);
if (type == BWN_DMA_64BIT) {
value &= BWN_DMA64_TXSTAT;
if (value == BWN_DMA64_TXSTAT_DISABLED) {
i = -1;
break;
}
} else {
value &= BWN_DMA32_TXSTATE;
if (value == BWN_DMA32_TXSTAT_DISABLED) {
i = -1;
break;
}
}
DELAY(1000);
}
if (i != -1) {
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (ENODEV);
}
DELAY(1000);
return (0);
}
static int
bwn_dma_rx_reset(struct bwn_mac *mac, uint16_t base,
int type)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t value;
int i;
uint16_t offset;
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXCTL : BWN_DMA32_RXCTL;
BWN_WRITE_4(mac, base + offset, 0);
for (i = 0; i < 10; i++) {
offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXSTATUS :
BWN_DMA32_RXSTATUS;
value = BWN_READ_4(mac, base + offset);
if (type == BWN_DMA_64BIT) {
value &= BWN_DMA64_RXSTAT;
if (value == BWN_DMA64_RXSTAT_DISABLED) {
i = -1;
break;
}
} else {
value &= BWN_DMA32_RXSTATE;
if (value == BWN_DMA32_RXSTAT_DISABLED) {
i = -1;
break;
}
}
DELAY(1000);
}
if (i != -1) {
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (ENODEV);
}
return (0);
}
static void
bwn_dma_free_descbuf(struct bwn_dma_ring *dr,
struct bwn_dmadesc_meta *meta)
{
if (meta->mt_m != NULL) {
m_freem(meta->mt_m);
meta->mt_m = NULL;
}
if (meta->mt_ni != NULL) {
ieee80211_free_node(meta->mt_ni);
meta->mt_ni = NULL;
}
}
static void
bwn_dma_set_redzone(struct bwn_dma_ring *dr, struct mbuf *m)
{
struct bwn_rxhdr4 *rxhdr;
unsigned char *frame;
rxhdr = mtod(m, struct bwn_rxhdr4 *);
rxhdr->frame_len = 0;
KASSERT(dr->dr_rx_bufsize >= dr->dr_frameoffset +
sizeof(struct bwn_plcp6) + 2,
("%s:%d: fail", __func__, __LINE__));
frame = mtod(m, char *) + dr->dr_frameoffset;
memset(frame, 0xff, sizeof(struct bwn_plcp6) + 2 /* padding */);
}
static uint8_t
bwn_dma_check_redzone(struct bwn_dma_ring *dr, struct mbuf *m)
{
unsigned char *f = mtod(m, char *) + dr->dr_frameoffset;
return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7])
== 0xff);
}
static void
bwn_wme_init(struct bwn_mac *mac)
{
bwn_wme_load(mac);
/* enable WME support. */
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_EDCF);
BWN_WRITE_2(mac, BWN_IFSCTL, BWN_READ_2(mac, BWN_IFSCTL) |
BWN_IFSCTL_USE_EDCF);
}
static void
bwn_spu_setdelay(struct bwn_mac *mac, int idle)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t delay; /* microsec */
delay = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 3700 : 1050;
if (ic->ic_opmode == IEEE80211_M_IBSS || idle)
delay = 500;
if ((mac->mac_phy.rf_ver == 0x2050) && (mac->mac_phy.rf_rev == 8))
delay = max(delay, (uint16_t)2400);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SPU_WAKEUP, delay);
}
static void
bwn_bt_enable(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint64_t hf;
if (bwn_bluetooth == 0)
return;
if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCOEXIST) == 0)
return;
if (mac->mac_phy.type != BWN_PHYTYPE_B && !mac->mac_phy.gmode)
return;
hf = bwn_hf_read(mac);
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCMOD)
hf |= BWN_HF_BT_COEXISTALT;
else
hf |= BWN_HF_BT_COEXIST;
bwn_hf_write(mac, hf);
}
static void
bwn_set_macaddr(struct bwn_mac *mac)
{
bwn_mac_write_bssid(mac);
- bwn_mac_setfilter(mac, BWN_MACFILTER_SELF, mac->mac_sc->sc_macaddr);
+ bwn_mac_setfilter(mac, BWN_MACFILTER_SELF,
+ mac->mac_sc->sc_ic.ic_macaddr);
}
static void
bwn_clear_keys(struct bwn_mac *mac)
{
int i;
for (i = 0; i < mac->mac_max_nr_keys; i++) {
KASSERT(i >= 0 && i < mac->mac_max_nr_keys,
("%s:%d: fail", __func__, __LINE__));
bwn_key_dowrite(mac, i, BWN_SEC_ALGO_NONE,
NULL, BWN_SEC_KEYSIZE, NULL);
if ((i <= 3) && !BWN_SEC_NEWAPI(mac)) {
bwn_key_dowrite(mac, i + 4, BWN_SEC_ALGO_NONE,
NULL, BWN_SEC_KEYSIZE, NULL);
}
mac->mac_key[i].keyconf = NULL;
}
}
static void
bwn_crypt_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
mac->mac_max_nr_keys = (siba_get_revid(sc->sc_dev) >= 5) ? 58 : 20;
KASSERT(mac->mac_max_nr_keys <= N(mac->mac_key),
("%s:%d: fail", __func__, __LINE__));
mac->mac_ktp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_KEY_TABLEP);
mac->mac_ktp *= 2;
if (siba_get_revid(sc->sc_dev) >= 5)
BWN_WRITE_2(mac, BWN_RCMTA_COUNT, mac->mac_max_nr_keys - 8);
bwn_clear_keys(mac);
}
static void
bwn_chip_exit(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
bwn_phy_exit(mac);
siba_gpio_set(sc->sc_dev, 0);
}
static int
bwn_fw_fillinfo(struct bwn_mac *mac)
{
int error;
error = bwn_fw_gets(mac, BWN_FWTYPE_DEFAULT);
if (error == 0)
return (0);
error = bwn_fw_gets(mac, BWN_FWTYPE_OPENSOURCE);
if (error == 0)
return (0);
return (error);
}
static int
bwn_gpio_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t mask = 0x1f, set = 0xf, value;
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GPOUT_MASK);
BWN_WRITE_2(mac, BWN_GPIO_MASK,
BWN_READ_2(mac, BWN_GPIO_MASK) | 0x000f);
if (siba_get_chipid(sc->sc_dev) == 0x4301) {
mask |= 0x0060;
set |= 0x0060;
}
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) {
BWN_WRITE_2(mac, BWN_GPIO_MASK,
BWN_READ_2(mac, BWN_GPIO_MASK) | 0x0200);
mask |= 0x0200;
set |= 0x0200;
}
if (siba_get_revid(sc->sc_dev) >= 2)
mask |= 0x0010;
value = siba_gpio_get(sc->sc_dev);
if (value == -1)
return (0);
siba_gpio_set(sc->sc_dev, (value & mask) | set);
return (0);
}
static int
bwn_fw_loadinitvals(struct bwn_mac *mac)
{
#define GETFWOFFSET(fwp, offset) \
((const struct bwn_fwinitvals *)((const char *)fwp.fw->data + offset))
const size_t hdr_len = sizeof(struct bwn_fwhdr);
const struct bwn_fwhdr *hdr;
struct bwn_fw *fw = &mac->mac_fw;
int error;
hdr = (const struct bwn_fwhdr *)(fw->initvals.fw->data);
error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals, hdr_len),
be32toh(hdr->size), fw->initvals.fw->datasize - hdr_len);
if (error)
return (error);
if (fw->initvals_band.fw) {
hdr = (const struct bwn_fwhdr *)(fw->initvals_band.fw->data);
error = bwn_fwinitvals_write(mac,
GETFWOFFSET(fw->initvals_band, hdr_len),
be32toh(hdr->size),
fw->initvals_band.fw->datasize - hdr_len);
}
return (error);
#undef GETFWOFFSET
}
static int
bwn_phy_init(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int error;
mac->mac_phy.chan = mac->mac_phy.get_default_chan(mac);
mac->mac_phy.rf_onoff(mac, 1);
error = mac->mac_phy.init(mac);
if (error) {
device_printf(sc->sc_dev, "PHY init failed\n");
goto fail0;
}
error = bwn_switch_channel(mac,
mac->mac_phy.get_default_chan(mac));
if (error) {
device_printf(sc->sc_dev,
"failed to switch default channel\n");
goto fail1;
}
return (0);
fail1:
if (mac->mac_phy.exit)
mac->mac_phy.exit(mac);
fail0:
mac->mac_phy.rf_onoff(mac, 0);
return (error);
}
static void
bwn_set_txantenna(struct bwn_mac *mac, int antenna)
{
uint16_t ant;
uint16_t tmp;
ant = bwn_ant2phy(antenna);
/* For ACK/CTS */
tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL);
tmp = (tmp & ~BWN_TX_PHY_ANT) | ant;
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, tmp);
/* For Probe Resposes */
tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL);
tmp = (tmp & ~BWN_TX_PHY_ANT) | ant;
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, tmp);
}
static void
bwn_set_opmode(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t ctl;
uint16_t cfp_pretbtt;
ctl = BWN_READ_4(mac, BWN_MACCTL);
ctl &= ~(BWN_MACCTL_HOSTAP | BWN_MACCTL_PASS_CTL |
BWN_MACCTL_PASS_BADPLCP | BWN_MACCTL_PASS_BADFCS |
BWN_MACCTL_PROMISC | BWN_MACCTL_BEACON_PROMISC);
ctl |= BWN_MACCTL_STA;
if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS)
ctl |= BWN_MACCTL_HOSTAP;
else if (ic->ic_opmode == IEEE80211_M_IBSS)
ctl &= ~BWN_MACCTL_STA;
ctl |= sc->sc_filters;
if (siba_get_revid(sc->sc_dev) <= 4)
ctl |= BWN_MACCTL_PROMISC;
BWN_WRITE_4(mac, BWN_MACCTL, ctl);
cfp_pretbtt = 2;
if ((ctl & BWN_MACCTL_STA) && !(ctl & BWN_MACCTL_HOSTAP)) {
if (siba_get_chipid(sc->sc_dev) == 0x4306 &&
siba_get_chiprev(sc->sc_dev) == 3)
cfp_pretbtt = 100;
else
cfp_pretbtt = 50;
}
BWN_WRITE_2(mac, 0x612, cfp_pretbtt);
}
static int
bwn_dma_gettype(struct bwn_mac *mac)
{
uint32_t tmp;
uint16_t base;
tmp = BWN_READ_4(mac, SIBA_TGSHIGH);
if (tmp & SIBA_TGSHIGH_DMA64)
return (BWN_DMA_64BIT);
base = bwn_dma_base(0, 0);
BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK);
tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL);
if (tmp & BWN_DMA32_TXADDREXT_MASK)
return (BWN_DMA_32BIT);
return (BWN_DMA_30BIT);
}
static void
bwn_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
{
if (!error) {
KASSERT(nseg == 1, ("too many segments(%d)\n", nseg));
*((bus_addr_t *)arg) = seg->ds_addr;
}
}
static void
bwn_phy_g_init_sub(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
uint16_t i, tmp;
if (phy->rev == 1)
bwn_phy_init_b5(mac);
else
bwn_phy_init_b6(mac);
if (phy->rev >= 2 || phy->gmode)
bwn_phy_init_a(mac);
if (phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, 0);
BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, 0);
}
if (phy->rev == 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0);
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xc0);
}
if (phy->rev > 5) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x400);
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xc0);
}
if (phy->gmode || phy->rev >= 2) {
tmp = BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM);
tmp &= BWN_PHYVER_VERSION;
if (tmp == 3 || tmp == 5) {
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc2), 0x1816);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc3), 0x8006);
}
if (tmp == 5) {
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xcc), 0x00ff,
0x1f00);
}
}
if ((phy->rev <= 2 && phy->gmode) || phy->rev >= 2)
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x7e), 0x78);
if (phy->rf_rev == 8) {
BWN_PHY_SET(mac, BWN_PHY_EXTG(0x01), 0x80);
BWN_PHY_SET(mac, BWN_PHY_OFDM(0x3e), 0x4);
}
if (BWN_HAS_LOOPBACK(phy))
bwn_loopback_calcgain(mac);
if (phy->rf_rev != 8) {
if (pg->pg_initval == 0xffff)
pg->pg_initval = bwn_rf_init_bcm2050(mac);
else
BWN_RF_WRITE(mac, 0x0078, pg->pg_initval);
}
bwn_lo_g_init(mac);
if (BWN_HAS_TXMAG(phy)) {
BWN_RF_WRITE(mac, 0x52,
(BWN_RF_READ(mac, 0x52) & 0xff00)
| pg->pg_loctl.tx_bias |
pg->pg_loctl.tx_magn);
} else {
BWN_RF_SETMASK(mac, 0x52, 0xfff0, pg->pg_loctl.tx_bias);
}
if (phy->rev >= 6) {
BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x36), 0x0fff,
(pg->pg_loctl.tx_bias << 12));
}
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL)
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8075);
else
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x807f);
if (phy->rev < 2)
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x101);
else
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x202);
if (phy->gmode || phy->rev >= 2) {
bwn_lo_g_adjust(mac);
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8078);
}
if (!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) {
for (i = 0; i < 64; i++) {
BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_CTRL, i);
BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_DATA,
(uint16_t)MIN(MAX(bwn_nrssi_read(mac, i) - 0xffff,
-32), 31));
}
bwn_nrssi_threshold(mac);
} else if (phy->gmode || phy->rev >= 2) {
if (pg->pg_nrssi[0] == -1000) {
KASSERT(pg->pg_nrssi[1] == -1000,
("%s:%d: fail", __func__, __LINE__));
bwn_nrssi_slope_11g(mac);
} else
bwn_nrssi_threshold(mac);
}
if (phy->rf_rev == 8)
BWN_PHY_WRITE(mac, BWN_PHY_EXTG(0x05), 0x3230);
bwn_phy_hwpctl_init(mac);
if ((siba_get_chipid(sc->sc_dev) == 0x4306
&& siba_get_chippkg(sc->sc_dev) == 2) || 0) {
BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0xbfff);
BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xc3), 0x7fff);
}
}
static uint8_t
bwn_has_hwpctl(struct bwn_mac *mac)
{
if (mac->mac_phy.hwpctl == 0 || mac->mac_phy.use_hwpctl == NULL)
return (0);
return (mac->mac_phy.use_hwpctl(mac));
}
static void
bwn_phy_init_b5(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
uint16_t offset, value;
uint8_t old_channel;
if (phy->analog == 1)
BWN_RF_SET(mac, 0x007a, 0x0050);
if ((siba_get_pci_subvendor(sc->sc_dev) != SIBA_BOARDVENDOR_BCM) &&
(siba_get_pci_subdevice(sc->sc_dev) != SIBA_BOARD_BU4306)) {
value = 0x2120;
for (offset = 0x00a8; offset < 0x00c7; offset++) {
BWN_PHY_WRITE(mac, offset, value);
value += 0x202;
}
}
BWN_PHY_SETMASK(mac, 0x0035, 0xf0ff, 0x0700);
if (phy->rf_ver == 0x2050)
BWN_PHY_WRITE(mac, 0x0038, 0x0667);
if (phy->gmode || phy->rev >= 2) {
if (phy->rf_ver == 0x2050) {
BWN_RF_SET(mac, 0x007a, 0x0020);
BWN_RF_SET(mac, 0x0051, 0x0004);
}
BWN_WRITE_2(mac, BWN_PHY_RADIO, 0x0000);
BWN_PHY_SET(mac, 0x0802, 0x0100);
BWN_PHY_SET(mac, 0x042b, 0x2000);
BWN_PHY_WRITE(mac, 0x001c, 0x186a);
BWN_PHY_SETMASK(mac, 0x0013, 0x00ff, 0x1900);
BWN_PHY_SETMASK(mac, 0x0035, 0xffc0, 0x0064);
BWN_PHY_SETMASK(mac, 0x005d, 0xff80, 0x000a);
}
if (mac->mac_flags & BWN_MAC_FLAG_BADFRAME_PREEMP)
BWN_PHY_SET(mac, BWN_PHY_RADIO_BITFIELD, (1 << 11));
if (phy->analog == 1) {
BWN_PHY_WRITE(mac, 0x0026, 0xce00);
BWN_PHY_WRITE(mac, 0x0021, 0x3763);
BWN_PHY_WRITE(mac, 0x0022, 0x1bc3);
BWN_PHY_WRITE(mac, 0x0023, 0x06f9);
BWN_PHY_WRITE(mac, 0x0024, 0x037e);
} else
BWN_PHY_WRITE(mac, 0x0026, 0xcc00);
BWN_PHY_WRITE(mac, 0x0030, 0x00c6);
BWN_WRITE_2(mac, 0x03ec, 0x3f22);
if (phy->analog == 1)
BWN_PHY_WRITE(mac, 0x0020, 0x3e1c);
else
BWN_PHY_WRITE(mac, 0x0020, 0x301c);
if (phy->analog == 0)
BWN_WRITE_2(mac, 0x03e4, 0x3000);
old_channel = phy->chan;
bwn_phy_g_switch_chan(mac, 7, 0);
if (phy->rf_ver != 0x2050) {
BWN_RF_WRITE(mac, 0x0075, 0x0080);
BWN_RF_WRITE(mac, 0x0079, 0x0081);
}
BWN_RF_WRITE(mac, 0x0050, 0x0020);
BWN_RF_WRITE(mac, 0x0050, 0x0023);
if (phy->rf_ver == 0x2050) {
BWN_RF_WRITE(mac, 0x0050, 0x0020);
BWN_RF_WRITE(mac, 0x005a, 0x0070);
}
BWN_RF_WRITE(mac, 0x005b, 0x007b);
BWN_RF_WRITE(mac, 0x005c, 0x00b0);
BWN_RF_SET(mac, 0x007a, 0x0007);
bwn_phy_g_switch_chan(mac, old_channel, 0);
BWN_PHY_WRITE(mac, 0x0014, 0x0080);
BWN_PHY_WRITE(mac, 0x0032, 0x00ca);
BWN_PHY_WRITE(mac, 0x002a, 0x88a3);
bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt,
pg->pg_txctl);
if (phy->rf_ver == 0x2050)
BWN_RF_WRITE(mac, 0x005d, 0x000d);
BWN_WRITE_2(mac, 0x03e4, (BWN_READ_2(mac, 0x03e4) & 0xffc0) | 0x0004);
}
static void
bwn_loopback_calcgain(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
uint16_t backup_phy[16] = { 0 };
uint16_t backup_radio[3];
uint16_t backup_bband;
uint16_t i, j, loop_i_max;
uint16_t trsw_rx;
uint16_t loop1_outer_done, loop1_inner_done;
backup_phy[0] = BWN_PHY_READ(mac, BWN_PHY_CRS0);
backup_phy[1] = BWN_PHY_READ(mac, BWN_PHY_CCKBBANDCFG);
backup_phy[2] = BWN_PHY_READ(mac, BWN_PHY_RFOVER);
backup_phy[3] = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL);
if (phy->rev != 1) {
backup_phy[4] = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER);
backup_phy[5] = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL);
}
backup_phy[6] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x5a));
backup_phy[7] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x59));
backup_phy[8] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x58));
backup_phy[9] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x0a));
backup_phy[10] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x03));
backup_phy[11] = BWN_PHY_READ(mac, BWN_PHY_LO_MASK);
backup_phy[12] = BWN_PHY_READ(mac, BWN_PHY_LO_CTL);
backup_phy[13] = BWN_PHY_READ(mac, BWN_PHY_CCK(0x2b));
backup_phy[14] = BWN_PHY_READ(mac, BWN_PHY_PGACTL);
backup_phy[15] = BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE);
backup_bband = pg->pg_bbatt.att;
backup_radio[0] = BWN_RF_READ(mac, 0x52);
backup_radio[1] = BWN_RF_READ(mac, 0x43);
backup_radio[2] = BWN_RF_READ(mac, 0x7a);
BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x3fff);
BWN_PHY_SET(mac, BWN_PHY_CCKBBANDCFG, 0x8000);
BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0002);
BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xfffd);
BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0001);
BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xfffe);
if (phy->rev != 1) {
BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0001);
BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffe);
BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0002);
BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffd);
}
BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x000c);
BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x000c);
BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0030);
BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xffcf, 0x10);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0780);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d);
BWN_PHY_SET(mac, BWN_PHY_CCK(0x0a), 0x2000);
if (phy->rev != 1) {
BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0004);
BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffb);
}
BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x03), 0xff9f, 0x40);
if (phy->rf_rev == 8)
BWN_RF_WRITE(mac, 0x43, 0x000f);
else {
BWN_RF_WRITE(mac, 0x52, 0);
BWN_RF_SETMASK(mac, 0x43, 0xfff0, 0x9);
}
bwn_phy_g_set_bbatt(mac, 11);
if (phy->rev >= 3)
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc020);
else
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8020);
BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, 0);
BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x2b), 0xffc0, 0x01);
BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x2b), 0xc0ff, 0x800);
BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0100);
BWN_PHY_MASK(mac, BWN_PHY_RFOVERVAL, 0xcfff);
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) {
if (phy->rev >= 7) {
BWN_PHY_SET(mac, BWN_PHY_RFOVER, 0x0800);
BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x8000);
}
}
BWN_RF_MASK(mac, 0x7a, 0x00f7);
j = 0;
loop_i_max = (phy->rf_rev == 8) ? 15 : 9;
for (i = 0; i < loop_i_max; i++) {
for (j = 0; j < 16; j++) {
BWN_RF_WRITE(mac, 0x43, i);
BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xf0ff,
(j << 8));
BWN_PHY_SETMASK(mac, BWN_PHY_PGACTL, 0x0fff, 0xa000);
BWN_PHY_SET(mac, BWN_PHY_PGACTL, 0xf000);
DELAY(20);
if (BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE) >= 0xdfc)
goto done0;
}
}
done0:
loop1_outer_done = i;
loop1_inner_done = j;
if (j >= 8) {
BWN_PHY_SET(mac, BWN_PHY_RFOVERVAL, 0x30);
trsw_rx = 0x1b;
for (j = j - 8; j < 16; j++) {
BWN_PHY_SETMASK(mac, BWN_PHY_RFOVERVAL, 0xf0ff, j << 8);
BWN_PHY_SETMASK(mac, BWN_PHY_PGACTL, 0x0fff, 0xa000);
BWN_PHY_SET(mac, BWN_PHY_PGACTL, 0xf000);
DELAY(20);
trsw_rx -= 3;
if (BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE) >= 0xdfc)
goto done1;
}
} else
trsw_rx = 0x18;
done1:
if (phy->rev != 1) {
BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, backup_phy[4]);
BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL, backup_phy[5]);
}
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), backup_phy[6]);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), backup_phy[7]);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), backup_phy[8]);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x0a), backup_phy[9]);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x03), backup_phy[10]);
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, backup_phy[11]);
BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, backup_phy[12]);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), backup_phy[13]);
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, backup_phy[14]);
bwn_phy_g_set_bbatt(mac, backup_bband);
BWN_RF_WRITE(mac, 0x52, backup_radio[0]);
BWN_RF_WRITE(mac, 0x43, backup_radio[1]);
BWN_RF_WRITE(mac, 0x7a, backup_radio[2]);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, backup_phy[2] | 0x0003);
DELAY(10);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, backup_phy[2]);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, backup_phy[3]);
BWN_PHY_WRITE(mac, BWN_PHY_CRS0, backup_phy[0]);
BWN_PHY_WRITE(mac, BWN_PHY_CCKBBANDCFG, backup_phy[1]);
pg->pg_max_lb_gain =
((loop1_inner_done * 6) - (loop1_outer_done * 4)) - 11;
pg->pg_trsw_rx_gain = trsw_rx * 2;
}
static uint16_t
bwn_rf_init_bcm2050(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
uint32_t tmp1 = 0, tmp2 = 0;
uint16_t rcc, i, j, pgactl, cck0, cck1, cck2, cck3, rfover, rfoverval,
analogover, analogoverval, crs0, classctl, lomask, loctl, syncctl,
radio0, radio1, radio2, reg0, reg1, reg2, radio78, reg, index;
static const uint8_t rcc_table[] = {
0x02, 0x03, 0x01, 0x0f,
0x06, 0x07, 0x05, 0x0f,
0x0a, 0x0b, 0x09, 0x0f,
0x0e, 0x0f, 0x0d, 0x0f,
};
loctl = lomask = reg0 = classctl = crs0 = analogoverval = analogover =
rfoverval = rfover = cck3 = 0;
radio0 = BWN_RF_READ(mac, 0x43);
radio1 = BWN_RF_READ(mac, 0x51);
radio2 = BWN_RF_READ(mac, 0x52);
pgactl = BWN_PHY_READ(mac, BWN_PHY_PGACTL);
cck0 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x5a));
cck1 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x59));
cck2 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x58));
if (phy->type == BWN_PHYTYPE_B) {
cck3 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x30));
reg0 = BWN_READ_2(mac, 0x3ec);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), 0xff);
BWN_WRITE_2(mac, 0x3ec, 0x3f3f);
} else if (phy->gmode || phy->rev >= 2) {
rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER);
rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL);
analogover = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER);
analogoverval = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL);
crs0 = BWN_PHY_READ(mac, BWN_PHY_CRS0);
classctl = BWN_PHY_READ(mac, BWN_PHY_CLASSCTL);
BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0003);
BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffc);
BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x7fff);
BWN_PHY_MASK(mac, BWN_PHY_CLASSCTL, 0xfffc);
if (BWN_HAS_LOOPBACK(phy)) {
lomask = BWN_PHY_READ(mac, BWN_PHY_LO_MASK);
loctl = BWN_PHY_READ(mac, BWN_PHY_LO_CTL);
if (phy->rev >= 3)
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc020);
else
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8020);
BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, 0);
}
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL,
BWN_LPD(0, 1, 1)));
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER,
bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVER, 0));
}
BWN_WRITE_2(mac, 0x3e2, BWN_READ_2(mac, 0x3e2) | 0x8000);
syncctl = BWN_PHY_READ(mac, BWN_PHY_SYNCCTL);
BWN_PHY_MASK(mac, BWN_PHY_SYNCCTL, 0xff7f);
reg1 = BWN_READ_2(mac, 0x3e6);
reg2 = BWN_READ_2(mac, 0x3f4);
if (phy->analog == 0)
BWN_WRITE_2(mac, 0x03e6, 0x0122);
else {
if (phy->analog >= 2)
BWN_PHY_SETMASK(mac, BWN_PHY_CCK(0x03), 0xffbf, 0x40);
BWN_WRITE_2(mac, BWN_CHANNEL_EXT,
(BWN_READ_2(mac, BWN_CHANNEL_EXT) | 0x2000));
}
reg = BWN_RF_READ(mac, 0x60);
index = (reg & 0x001e) >> 1;
rcc = (((rcc_table[index] << 1) | (reg & 0x0001)) | 0x0020);
if (phy->type == BWN_PHYTYPE_B)
BWN_RF_WRITE(mac, 0x78, 0x26);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL,
BWN_LPD(0, 1, 1)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xbfaf);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x1403);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac, BWN_PHY_RFOVERVAL,
BWN_LPD(0, 0, 1)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xbfa0);
BWN_RF_SET(mac, 0x51, 0x0004);
if (phy->rf_rev == 8)
BWN_RF_WRITE(mac, 0x43, 0x1f);
else {
BWN_RF_WRITE(mac, 0x52, 0);
BWN_RF_SETMASK(mac, 0x43, 0xfff0, 0x0009);
}
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0);
for (i = 0; i < 16; i++) {
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0480);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac,
BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0);
DELAY(10);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac,
BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xefb0);
DELAY(10);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac,
BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 0)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xfff0);
DELAY(20);
tmp1 += BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac,
BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0);
}
DELAY(10);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0);
tmp1++;
tmp1 >>= 9;
for (i = 0; i < 16; i++) {
radio78 = (BWN_BITREV4(i) << 1) | 0x0020;
BWN_RF_WRITE(mac, 0x78, radio78);
DELAY(10);
for (j = 0; j < 16; j++) {
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), 0x0d80);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), 0xc810);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0x000d);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac,
BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0);
DELAY(10);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac,
BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xefb0);
DELAY(10);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac,
BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 0)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xfff0);
DELAY(10);
tmp2 += BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), 0);
if (phy->gmode || phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL,
bwn_rf_2050_rfoverval(mac,
BWN_PHY_RFOVERVAL, BWN_LPD(1, 0, 1)));
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xafb0);
}
tmp2++;
tmp2 >>= 8;
if (tmp1 < tmp2)
break;
}
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pgactl);
BWN_RF_WRITE(mac, 0x51, radio1);
BWN_RF_WRITE(mac, 0x52, radio2);
BWN_RF_WRITE(mac, 0x43, radio0);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x5a), cck0);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x59), cck1);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x58), cck2);
BWN_WRITE_2(mac, 0x3e6, reg1);
if (phy->analog != 0)
BWN_WRITE_2(mac, 0x3f4, reg2);
BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, syncctl);
bwn_spu_workaround(mac, phy->chan);
if (phy->type == BWN_PHYTYPE_B) {
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), cck3);
BWN_WRITE_2(mac, 0x3ec, reg0);
} else if (phy->gmode) {
BWN_WRITE_2(mac, BWN_PHY_RADIO,
BWN_READ_2(mac, BWN_PHY_RADIO)
& 0x7fff);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, rfover);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfoverval);
BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, analogover);
BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL,
analogoverval);
BWN_PHY_WRITE(mac, BWN_PHY_CRS0, crs0);
BWN_PHY_WRITE(mac, BWN_PHY_CLASSCTL, classctl);
if (BWN_HAS_LOOPBACK(phy)) {
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, lomask);
BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, loctl);
}
}
return ((i > 15) ? radio78 : rcc);
}
static void
bwn_phy_init_b6(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
uint16_t offset, val;
uint8_t old_channel;
KASSERT(!(phy->rf_rev == 6 || phy->rf_rev == 7),
("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, 0x003e, 0x817a);
BWN_RF_WRITE(mac, 0x007a, BWN_RF_READ(mac, 0x007a) | 0x0058);
if (phy->rf_rev == 4 || phy->rf_rev == 5) {
BWN_RF_WRITE(mac, 0x51, 0x37);
BWN_RF_WRITE(mac, 0x52, 0x70);
BWN_RF_WRITE(mac, 0x53, 0xb3);
BWN_RF_WRITE(mac, 0x54, 0x9b);
BWN_RF_WRITE(mac, 0x5a, 0x88);
BWN_RF_WRITE(mac, 0x5b, 0x88);
BWN_RF_WRITE(mac, 0x5d, 0x88);
BWN_RF_WRITE(mac, 0x5e, 0x88);
BWN_RF_WRITE(mac, 0x7d, 0x88);
bwn_hf_write(mac,
bwn_hf_read(mac) | BWN_HF_TSSI_RESET_PSM_WORKAROUN);
}
if (phy->rf_rev == 8) {
BWN_RF_WRITE(mac, 0x51, 0);
BWN_RF_WRITE(mac, 0x52, 0x40);
BWN_RF_WRITE(mac, 0x53, 0xb7);
BWN_RF_WRITE(mac, 0x54, 0x98);
BWN_RF_WRITE(mac, 0x5a, 0x88);
BWN_RF_WRITE(mac, 0x5b, 0x6b);
BWN_RF_WRITE(mac, 0x5c, 0x0f);
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_ALTIQ) {
BWN_RF_WRITE(mac, 0x5d, 0xfa);
BWN_RF_WRITE(mac, 0x5e, 0xd8);
} else {
BWN_RF_WRITE(mac, 0x5d, 0xf5);
BWN_RF_WRITE(mac, 0x5e, 0xb8);
}
BWN_RF_WRITE(mac, 0x0073, 0x0003);
BWN_RF_WRITE(mac, 0x007d, 0x00a8);
BWN_RF_WRITE(mac, 0x007c, 0x0001);
BWN_RF_WRITE(mac, 0x007e, 0x0008);
}
for (val = 0x1e1f, offset = 0x0088; offset < 0x0098; offset++) {
BWN_PHY_WRITE(mac, offset, val);
val -= 0x0202;
}
for (val = 0x3e3f, offset = 0x0098; offset < 0x00a8; offset++) {
BWN_PHY_WRITE(mac, offset, val);
val -= 0x0202;
}
for (val = 0x2120, offset = 0x00a8; offset < 0x00c8; offset++) {
BWN_PHY_WRITE(mac, offset, (val & 0x3f3f));
val += 0x0202;
}
if (phy->type == BWN_PHYTYPE_G) {
BWN_RF_SET(mac, 0x007a, 0x0020);
BWN_RF_SET(mac, 0x0051, 0x0004);
BWN_PHY_SET(mac, 0x0802, 0x0100);
BWN_PHY_SET(mac, 0x042b, 0x2000);
BWN_PHY_WRITE(mac, 0x5b, 0);
BWN_PHY_WRITE(mac, 0x5c, 0);
}
old_channel = phy->chan;
bwn_phy_g_switch_chan(mac, (old_channel >= 8) ? 1 : 13, 0);
BWN_RF_WRITE(mac, 0x0050, 0x0020);
BWN_RF_WRITE(mac, 0x0050, 0x0023);
DELAY(40);
if (phy->rf_rev < 6 || phy->rf_rev == 8) {
BWN_RF_WRITE(mac, 0x7c, BWN_RF_READ(mac, 0x7c) | 0x0002);
BWN_RF_WRITE(mac, 0x50, 0x20);
}
if (phy->rf_rev <= 2) {
BWN_RF_WRITE(mac, 0x7c, 0x20);
BWN_RF_WRITE(mac, 0x5a, 0x70);
BWN_RF_WRITE(mac, 0x5b, 0x7b);
BWN_RF_WRITE(mac, 0x5c, 0xb0);
}
BWN_RF_SETMASK(mac, 0x007a, 0x00f8, 0x0007);
bwn_phy_g_switch_chan(mac, old_channel, 0);
BWN_PHY_WRITE(mac, 0x0014, 0x0200);
if (phy->rf_rev >= 6)
BWN_PHY_WRITE(mac, 0x2a, 0x88c2);
else
BWN_PHY_WRITE(mac, 0x2a, 0x8ac0);
BWN_PHY_WRITE(mac, 0x0038, 0x0668);
bwn_phy_g_set_txpwr_sub(mac, &pg->pg_bbatt, &pg->pg_rfatt,
pg->pg_txctl);
if (phy->rf_rev <= 5)
BWN_PHY_SETMASK(mac, 0x5d, 0xff80, 0x0003);
if (phy->rf_rev <= 2)
BWN_RF_WRITE(mac, 0x005d, 0x000d);
if (phy->analog == 4) {
BWN_WRITE_2(mac, 0x3e4, 9);
BWN_PHY_MASK(mac, 0x61, 0x0fff);
} else
BWN_PHY_SETMASK(mac, 0x0002, 0xffc0, 0x0004);
if (phy->type == BWN_PHYTYPE_B)
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
else if (phy->type == BWN_PHYTYPE_G)
BWN_WRITE_2(mac, 0x03e6, 0x0);
}
static void
bwn_phy_init_a(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
KASSERT(phy->type == BWN_PHYTYPE_A || phy->type == BWN_PHYTYPE_G,
("%s:%d: fail", __func__, __LINE__));
if (phy->rev >= 6) {
if (phy->type == BWN_PHYTYPE_A)
BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x1b), ~0x1000);
if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) & BWN_PHY_ENCORE_EN)
BWN_PHY_SET(mac, BWN_PHY_ENCORE, 0x0010);
else
BWN_PHY_MASK(mac, BWN_PHY_ENCORE, ~0x1010);
}
bwn_wa_init(mac);
if (phy->type == BWN_PHYTYPE_G &&
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL))
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x6e), 0xe000, 0x3cf);
}
static void
bwn_wa_write_noisescale(struct bwn_mac *mac, const uint16_t *nst)
{
int i;
for (i = 0; i < BWN_TAB_NOISESCALE_SIZE; i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_NOISESCALE, i, nst[i]);
}
static void
bwn_wa_agc(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
if (phy->rev == 1) {
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 0, 254);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 1, 13);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 2, 19);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1_R1, 3, 25);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 0, 0x2710);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 1, 0x9b83);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 2, 0x9b83);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, 3, 0x0f8d);
BWN_PHY_WRITE(mac, BWN_PHY_LMS, 4);
} else {
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 0, 254);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 1, 13);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 2, 19);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC1, 3, 25);
}
BWN_PHY_SETMASK(mac, BWN_PHY_CCKSHIFTBITS_WA, (uint16_t)~0xff00,
0x5700);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1a), ~0x007f, 0x000f);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1a), ~0x3f80, 0x2b80);
BWN_PHY_SETMASK(mac, BWN_PHY_ANTWRSETT, 0xf0ff, 0x0300);
BWN_RF_SET(mac, 0x7a, 0x0008);
BWN_PHY_SETMASK(mac, BWN_PHY_N1P1GAIN, ~0x000f, 0x0008);
BWN_PHY_SETMASK(mac, BWN_PHY_P1P2GAIN, ~0x0f00, 0x0600);
BWN_PHY_SETMASK(mac, BWN_PHY_N1N2GAIN, ~0x0f00, 0x0700);
BWN_PHY_SETMASK(mac, BWN_PHY_N1P1GAIN, ~0x0f00, 0x0100);
if (phy->rev == 1)
BWN_PHY_SETMASK(mac, BWN_PHY_N1N2GAIN, ~0x000f, 0x0007);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x88), ~0x00ff, 0x001c);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x88), ~0x3f00, 0x0200);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x96), ~0x00ff, 0x001c);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x89), ~0x00ff, 0x0020);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x89), ~0x3f00, 0x0200);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x82), ~0x00ff, 0x002e);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x96), (uint16_t)~0xff00, 0x1a00);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x81), ~0x00ff, 0x0028);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x81), (uint16_t)~0xff00, 0x2c00);
if (phy->rev == 1) {
BWN_PHY_WRITE(mac, BWN_PHY_PEAK_COUNT, 0x092b);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x1b), ~0x001e, 0x0002);
} else {
BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x1b), ~0x001e);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x1f), 0x287a);
BWN_PHY_SETMASK(mac, BWN_PHY_LPFGAINCTL, ~0x000f, 0x0004);
if (phy->rev >= 6) {
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x22), 0x287a);
BWN_PHY_SETMASK(mac, BWN_PHY_LPFGAINCTL,
(uint16_t)~0xf000, 0x3000);
}
}
BWN_PHY_SETMASK(mac, BWN_PHY_DIVSRCHIDX, 0x8080, 0x7874);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8e), 0x1c00);
if (phy->rev == 1) {
BWN_PHY_SETMASK(mac, BWN_PHY_DIVP1P2GAIN, ~0x0f00, 0x0600);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8b), 0x005e);
BWN_PHY_SETMASK(mac, BWN_PHY_ANTWRSETT, ~0x00ff, 0x001e);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x8d), 0x0002);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 0, 0);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 1, 7);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 2, 16);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3_R1, 3, 28);
} else {
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 0, 0);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 1, 7);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 2, 16);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC3, 3, 28);
}
if (phy->rev >= 6) {
BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x26), ~0x0003);
BWN_PHY_MASK(mac, BWN_PHY_OFDM(0x26), ~0x1000);
}
BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM);
}
static void
bwn_wa_grev1(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
int i;
static const uint16_t bwn_tab_finefreqg[] = BWN_TAB_FINEFREQ_G;
static const uint32_t bwn_tab_retard[] = BWN_TAB_RETARD;
static const uint32_t bwn_tab_rotor[] = BWN_TAB_ROTOR;
KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__));
/* init CRSTHRES and ANTDWELL */
if (phy->rev == 1) {
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1_R1, 0x4f19);
} else if (phy->rev == 2) {
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x1861);
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0271);
BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800);
} else {
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x0098);
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0070);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc9), 0x0080);
BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800);
}
BWN_PHY_SETMASK(mac, BWN_PHY_CRS0, ~0x03c0, 0xd000);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0x2c), 0x005a);
BWN_PHY_WRITE(mac, BWN_PHY_CCKSHIFTBITS, 0x0026);
/* XXX support PHY-A??? */
for (i = 0; i < N(bwn_tab_finefreqg); i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DACRFPABB, i,
bwn_tab_finefreqg[i]);
/* XXX support PHY-A??? */
if (phy->rev == 1)
for (i = 0; i < N(bwn_tab_noise_g1); i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i,
bwn_tab_noise_g1[i]);
else
for (i = 0; i < N(bwn_tab_noise_g2); i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i,
bwn_tab_noise_g2[i]);
for (i = 0; i < N(bwn_tab_rotor); i++)
bwn_ofdmtab_write_4(mac, BWN_OFDMTAB_ROTOR, i,
bwn_tab_rotor[i]);
/* XXX support PHY-A??? */
if (phy->rev >= 6) {
if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) &
BWN_PHY_ENCORE_EN)
bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g3);
else
bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g2);
} else
bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g1);
for (i = 0; i < N(bwn_tab_retard); i++)
bwn_ofdmtab_write_4(mac, BWN_OFDMTAB_ADVRETARD, i,
bwn_tab_retard[i]);
if (phy->rev == 1) {
for (i = 0; i < 16; i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI_R1,
i, 0x0020);
} else {
for (i = 0; i < 32; i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI, i, 0x0820);
}
bwn_wa_agc(mac);
}
static void
bwn_wa_grev26789(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
int i;
static const uint16_t bwn_tab_sigmasqr2[] = BWN_TAB_SIGMASQR2;
uint16_t ofdmrev;
KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__));
bwn_gtab_write(mac, BWN_GTAB_ORIGTR, 0, 0xc480);
/* init CRSTHRES and ANTDWELL */
if (phy->rev == 1)
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1_R1, 0x4f19);
else if (phy->rev == 2) {
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x1861);
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0271);
BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800);
} else {
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES1, 0x0098);
BWN_PHY_WRITE(mac, BWN_PHY_CRSTHRES2, 0x0070);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xc9), 0x0080);
BWN_PHY_SET(mac, BWN_PHY_ANTDWELL, 0x0800);
}
for (i = 0; i < 64; i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_RSSI, i, i);
/* XXX support PHY-A??? */
if (phy->rev == 1)
for (i = 0; i < N(bwn_tab_noise_g1); i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i,
bwn_tab_noise_g1[i]);
else
for (i = 0; i < N(bwn_tab_noise_g2); i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_AGC2, i,
bwn_tab_noise_g2[i]);
/* XXX support PHY-A??? */
if (phy->rev >= 6) {
if (BWN_PHY_READ(mac, BWN_PHY_ENCORE) &
BWN_PHY_ENCORE_EN)
bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g3);
else
bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g2);
} else
bwn_wa_write_noisescale(mac, bwn_tab_noisescale_g1);
for (i = 0; i < N(bwn_tab_sigmasqr2); i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_MINSIGSQ, i,
bwn_tab_sigmasqr2[i]);
if (phy->rev == 1) {
for (i = 0; i < 16; i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI_R1, i,
0x0020);
} else {
for (i = 0; i < 32; i++)
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_WRSSI, i, 0x0820);
}
bwn_wa_agc(mac);
ofdmrev = BWN_PHY_READ(mac, BWN_PHY_VERSION_OFDM) & BWN_PHYVER_VERSION;
if (ofdmrev > 2) {
if (phy->type == BWN_PHYTYPE_A)
BWN_PHY_WRITE(mac, BWN_PHY_PWRDOWN, 0x1808);
else
BWN_PHY_WRITE(mac, BWN_PHY_PWRDOWN, 0x1000);
} else {
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 3, 0x1044);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 4, 0x7201);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_DAC, 6, 0x0040);
}
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_0F, 2, 15);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_0F, 3, 20);
}
static void
bwn_wa_init(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
KASSERT(phy->type == BWN_PHYTYPE_G, ("%s fail", __func__));
switch (phy->rev) {
case 1:
bwn_wa_grev1(mac);
break;
case 2:
case 6:
case 7:
case 8:
case 9:
bwn_wa_grev26789(mac);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
if (siba_get_pci_subvendor(sc->sc_dev) != SIBA_BOARDVENDOR_BCM ||
siba_get_pci_subdevice(sc->sc_dev) != SIBA_BOARD_BU4306 ||
siba_get_pci_revid(sc->sc_dev) != 0x17) {
if (phy->rev < 2) {
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX_R1, 1,
0x0002);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX_R1, 2,
0x0001);
} else {
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 1, 0x0002);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX, 2, 0x0001);
if ((siba_sprom_get_bf_lo(sc->sc_dev) &
BWN_BFL_EXTLNA) &&
(phy->rev >= 7)) {
BWN_PHY_MASK(mac, BWN_PHY_EXTG(0x11), 0xf7ff);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX,
0x0020, 0x0001);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX,
0x0021, 0x0001);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX,
0x0022, 0x0001);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX,
0x0023, 0x0000);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX,
0x0000, 0x0000);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_GAINX,
0x0003, 0x0002);
}
}
}
if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) {
BWN_PHY_WRITE(mac, BWN_PHY_GTABCTL, 0x3120);
BWN_PHY_WRITE(mac, BWN_PHY_GTABDATA, 0xc480);
}
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_11, 0, 0);
bwn_ofdmtab_write_2(mac, BWN_OFDMTAB_UNKNOWN_11, 1, 0);
}
static void
bwn_ofdmtab_write_2(struct bwn_mac *mac, uint16_t table, uint16_t offset,
uint16_t value)
{
struct bwn_phy_g *pg = &mac->mac_phy.phy_g;
uint16_t addr;
addr = table + offset;
if ((pg->pg_ofdmtab_dir != BWN_OFDMTAB_DIR_WRITE) ||
(addr - 1 != pg->pg_ofdmtab_addr)) {
BWN_PHY_WRITE(mac, BWN_PHY_OTABLECTL, addr);
pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_WRITE;
}
pg->pg_ofdmtab_addr = addr;
BWN_PHY_WRITE(mac, BWN_PHY_OTABLEI, value);
}
static void
bwn_ofdmtab_write_4(struct bwn_mac *mac, uint16_t table, uint16_t offset,
uint32_t value)
{
struct bwn_phy_g *pg = &mac->mac_phy.phy_g;
uint16_t addr;
addr = table + offset;
if ((pg->pg_ofdmtab_dir != BWN_OFDMTAB_DIR_WRITE) ||
(addr - 1 != pg->pg_ofdmtab_addr)) {
BWN_PHY_WRITE(mac, BWN_PHY_OTABLECTL, addr);
pg->pg_ofdmtab_dir = BWN_OFDMTAB_DIR_WRITE;
}
pg->pg_ofdmtab_addr = addr;
BWN_PHY_WRITE(mac, BWN_PHY_OTABLEI, value);
BWN_PHY_WRITE(mac, BWN_PHY_OTABLEQ, (value >> 16));
}
static void
bwn_gtab_write(struct bwn_mac *mac, uint16_t table, uint16_t offset,
uint16_t value)
{
BWN_PHY_WRITE(mac, BWN_PHY_GTABCTL, table + offset);
BWN_PHY_WRITE(mac, BWN_PHY_GTABDATA, value);
}
static void
bwn_dummy_transmission(struct bwn_mac *mac, int ofdm, int paon)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
unsigned int i, max_loop;
uint16_t value;
uint32_t buffer[5] = {
0x00000000, 0x00d40000, 0x00000000, 0x01000000, 0x00000000
};
if (ofdm) {
max_loop = 0x1e;
buffer[0] = 0x000201cc;
} else {
max_loop = 0xfa;
buffer[0] = 0x000b846e;
}
BWN_ASSERT_LOCKED(mac->mac_sc);
for (i = 0; i < 5; i++)
bwn_ram_write(mac, i * 4, buffer[i]);
BWN_WRITE_2(mac, 0x0568, 0x0000);
BWN_WRITE_2(mac, 0x07c0,
(siba_get_revid(sc->sc_dev) < 11) ? 0x0000 : 0x0100);
value = ((phy->type == BWN_PHYTYPE_A) ? 0x41 : 0x40);
BWN_WRITE_2(mac, 0x050c, value);
if (phy->type == BWN_PHYTYPE_LP)
BWN_WRITE_2(mac, 0x0514, 0x1a02);
BWN_WRITE_2(mac, 0x0508, 0x0000);
BWN_WRITE_2(mac, 0x050a, 0x0000);
BWN_WRITE_2(mac, 0x054c, 0x0000);
BWN_WRITE_2(mac, 0x056a, 0x0014);
BWN_WRITE_2(mac, 0x0568, 0x0826);
BWN_WRITE_2(mac, 0x0500, 0x0000);
if (phy->type == BWN_PHYTYPE_LP)
BWN_WRITE_2(mac, 0x0502, 0x0050);
else
BWN_WRITE_2(mac, 0x0502, 0x0030);
if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5)
BWN_RF_WRITE(mac, 0x0051, 0x0017);
for (i = 0x00; i < max_loop; i++) {
value = BWN_READ_2(mac, 0x050e);
if (value & 0x0080)
break;
DELAY(10);
}
for (i = 0x00; i < 0x0a; i++) {
value = BWN_READ_2(mac, 0x050e);
if (value & 0x0400)
break;
DELAY(10);
}
for (i = 0x00; i < 0x19; i++) {
value = BWN_READ_2(mac, 0x0690);
if (!(value & 0x0100))
break;
DELAY(10);
}
if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5)
BWN_RF_WRITE(mac, 0x0051, 0x0037);
}
static void
bwn_ram_write(struct bwn_mac *mac, uint16_t offset, uint32_t val)
{
uint32_t macctl;
KASSERT(offset % 4 == 0, ("%s:%d: fail", __func__, __LINE__));
macctl = BWN_READ_4(mac, BWN_MACCTL);
if (macctl & BWN_MACCTL_BIGENDIAN)
printf("TODO: need swap\n");
BWN_WRITE_4(mac, BWN_RAM_CONTROL, offset);
BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE);
BWN_WRITE_4(mac, BWN_RAM_DATA, val);
}
static void
bwn_lo_write(struct bwn_mac *mac, struct bwn_loctl *ctl)
{
uint16_t value;
KASSERT(mac->mac_phy.type == BWN_PHYTYPE_G,
("%s:%d: fail", __func__, __LINE__));
value = (uint8_t) (ctl->q);
value |= ((uint8_t) (ctl->i)) << 8;
BWN_PHY_WRITE(mac, BWN_PHY_LO_CTL, value);
}
static uint16_t
bwn_lo_calcfeed(struct bwn_mac *mac,
uint16_t lna, uint16_t pga, uint16_t trsw_rx)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
uint16_t rfover;
uint16_t feedthrough;
if (phy->gmode) {
lna <<= BWN_PHY_RFOVERVAL_LNA_SHIFT;
pga <<= BWN_PHY_RFOVERVAL_PGA_SHIFT;
KASSERT((lna & ~BWN_PHY_RFOVERVAL_LNA) == 0,
("%s:%d: fail", __func__, __LINE__));
KASSERT((pga & ~BWN_PHY_RFOVERVAL_PGA) == 0,
("%s:%d: fail", __func__, __LINE__));
trsw_rx &= (BWN_PHY_RFOVERVAL_TRSWRX | BWN_PHY_RFOVERVAL_BW);
rfover = BWN_PHY_RFOVERVAL_UNK | pga | lna | trsw_rx;
if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA) &&
phy->rev > 6)
rfover |= BWN_PHY_RFOVERVAL_EXTLNA;
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xe300);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover);
DELAY(10);
rfover |= BWN_PHY_RFOVERVAL_BW_LBW;
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover);
DELAY(10);
rfover |= BWN_PHY_RFOVERVAL_BW_LPF;
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, rfover);
DELAY(10);
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xf300);
} else {
pga |= BWN_PHY_PGACTL_UNKNOWN;
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga);
DELAY(10);
pga |= BWN_PHY_PGACTL_LOWBANDW;
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga);
DELAY(10);
pga |= BWN_PHY_PGACTL_LPF;
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, pga);
}
DELAY(21);
feedthrough = BWN_PHY_READ(mac, BWN_PHY_LO_LEAKAGE);
return (feedthrough);
}
static uint16_t
bwn_lo_txctl_regtable(struct bwn_mac *mac,
uint16_t *value, uint16_t *pad_mix_gain)
{
struct bwn_phy *phy = &mac->mac_phy;
uint16_t reg, v, padmix;
if (phy->type == BWN_PHYTYPE_B) {
v = 0x30;
if (phy->rf_rev <= 5) {
reg = 0x43;
padmix = 0;
} else {
reg = 0x52;
padmix = 5;
}
} else {
if (phy->rev >= 2 && phy->rf_rev == 8) {
reg = 0x43;
v = 0x10;
padmix = 2;
} else {
reg = 0x52;
v = 0x30;
padmix = 5;
}
}
if (value)
*value = v;
if (pad_mix_gain)
*pad_mix_gain = padmix;
return (reg);
}
static void
bwn_lo_measure_txctl_values(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_txpwr_loctl *lo = &pg->pg_loctl;
uint16_t reg, mask;
uint16_t trsw_rx, pga;
uint16_t rf_pctl_reg;
static const uint8_t tx_bias_values[] = {
0x09, 0x08, 0x0a, 0x01, 0x00,
0x02, 0x05, 0x04, 0x06,
};
static const uint8_t tx_magn_values[] = {
0x70, 0x40,
};
if (!BWN_HAS_LOOPBACK(phy)) {
rf_pctl_reg = 6;
trsw_rx = 2;
pga = 0;
} else {
int lb_gain;
trsw_rx = 0;
lb_gain = pg->pg_max_lb_gain / 2;
if (lb_gain > 10) {
rf_pctl_reg = 0;
pga = abs(10 - lb_gain) / 6;
pga = MIN(MAX(pga, 0), 15);
} else {
int cmp_val;
int tmp;
pga = 0;
cmp_val = 0x24;
if ((phy->rev >= 2) &&
(phy->rf_ver == 0x2050) && (phy->rf_rev == 8))
cmp_val = 0x3c;
tmp = lb_gain;
if ((10 - lb_gain) < cmp_val)
tmp = (10 - lb_gain);
if (tmp < 0)
tmp += 6;
else
tmp += 3;
cmp_val /= 4;
tmp /= 4;
if (tmp >= cmp_val)
rf_pctl_reg = cmp_val;
else
rf_pctl_reg = tmp;
}
}
BWN_RF_SETMASK(mac, 0x43, 0xfff0, rf_pctl_reg);
bwn_phy_g_set_bbatt(mac, 2);
reg = bwn_lo_txctl_regtable(mac, &mask, NULL);
mask = ~mask;
BWN_RF_MASK(mac, reg, mask);
if (BWN_HAS_TXMAG(phy)) {
int i, j;
int feedthrough;
int min_feedth = 0xffff;
uint8_t tx_magn, tx_bias;
for (i = 0; i < N(tx_magn_values); i++) {
tx_magn = tx_magn_values[i];
BWN_RF_SETMASK(mac, 0x52, 0xff0f, tx_magn);
for (j = 0; j < N(tx_bias_values); j++) {
tx_bias = tx_bias_values[j];
BWN_RF_SETMASK(mac, 0x52, 0xfff0, tx_bias);
feedthrough = bwn_lo_calcfeed(mac, 0, pga,
trsw_rx);
if (feedthrough < min_feedth) {
lo->tx_bias = tx_bias;
lo->tx_magn = tx_magn;
min_feedth = feedthrough;
}
if (lo->tx_bias == 0)
break;
}
BWN_RF_WRITE(mac, 0x52,
(BWN_RF_READ(mac, 0x52)
& 0xff00) | lo->tx_bias | lo->
tx_magn);
}
} else {
lo->tx_magn = 0;
lo->tx_bias = 0;
BWN_RF_MASK(mac, 0x52, 0xfff0);
}
BWN_GETTIME(lo->txctl_measured_time);
}
static void
bwn_lo_get_powervector(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_txpwr_loctl *lo = &pg->pg_loctl;
int i;
uint64_t tmp;
uint64_t power_vector = 0;
for (i = 0; i < 8; i += 2) {
tmp = bwn_shm_read_2(mac, BWN_SHARED, 0x310 + i);
power_vector |= (tmp << (i * 8));
bwn_shm_write_2(mac, BWN_SHARED, 0x310 + i, 0);
}
if (power_vector)
lo->power_vector = power_vector;
BWN_GETTIME(lo->pwr_vec_read_time);
}
static void
bwn_lo_measure_gain_values(struct bwn_mac *mac, int16_t max_rx_gain,
int use_trsw_rx)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
uint16_t tmp;
if (max_rx_gain < 0)
max_rx_gain = 0;
if (BWN_HAS_LOOPBACK(phy)) {
int trsw_rx = 0;
int trsw_rx_gain;
if (use_trsw_rx) {
trsw_rx_gain = pg->pg_trsw_rx_gain / 2;
if (max_rx_gain >= trsw_rx_gain) {
trsw_rx_gain = max_rx_gain - trsw_rx_gain;
trsw_rx = 0x20;
}
} else
trsw_rx_gain = max_rx_gain;
if (trsw_rx_gain < 9) {
pg->pg_lna_lod_gain = 0;
} else {
pg->pg_lna_lod_gain = 1;
trsw_rx_gain -= 8;
}
trsw_rx_gain = MIN(MAX(trsw_rx_gain, 0), 0x2d);
pg->pg_pga_gain = trsw_rx_gain / 3;
if (pg->pg_pga_gain >= 5) {
pg->pg_pga_gain -= 5;
pg->pg_lna_gain = 2;
} else
pg->pg_lna_gain = 0;
} else {
pg->pg_lna_gain = 0;
pg->pg_trsw_rx_gain = 0x20;
if (max_rx_gain >= 0x14) {
pg->pg_lna_lod_gain = 1;
pg->pg_pga_gain = 2;
} else if (max_rx_gain >= 0x12) {
pg->pg_lna_lod_gain = 1;
pg->pg_pga_gain = 1;
} else if (max_rx_gain >= 0xf) {
pg->pg_lna_lod_gain = 1;
pg->pg_pga_gain = 0;
} else {
pg->pg_lna_lod_gain = 0;
pg->pg_pga_gain = 0;
}
}
tmp = BWN_RF_READ(mac, 0x7a);
if (pg->pg_lna_lod_gain == 0)
tmp &= ~0x0008;
else
tmp |= 0x0008;
BWN_RF_WRITE(mac, 0x7a, tmp);
}
static void
bwn_lo_save(struct bwn_mac *mac, struct bwn_lo_g_value *sav)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_txpwr_loctl *lo = &pg->pg_loctl;
struct timespec ts;
uint16_t tmp;
if (bwn_has_hwpctl(mac)) {
sav->phy_lomask = BWN_PHY_READ(mac, BWN_PHY_LO_MASK);
sav->phy_extg = BWN_PHY_READ(mac, BWN_PHY_EXTG(0x01));
sav->phy_dacctl_hwpctl = BWN_PHY_READ(mac, BWN_PHY_DACCTL);
sav->phy_cck4 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x14));
sav->phy_hpwr_tssictl = BWN_PHY_READ(mac, BWN_PHY_HPWR_TSSICTL);
BWN_PHY_SET(mac, BWN_PHY_HPWR_TSSICTL, 0x100);
BWN_PHY_SET(mac, BWN_PHY_EXTG(0x01), 0x40);
BWN_PHY_SET(mac, BWN_PHY_DACCTL, 0x40);
BWN_PHY_SET(mac, BWN_PHY_CCK(0x14), 0x200);
}
if (phy->type == BWN_PHYTYPE_B &&
phy->rf_ver == 0x2050 && phy->rf_rev < 6) {
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x16), 0x410);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x17), 0x820);
}
if (phy->rev >= 2) {
sav->phy_analogover = BWN_PHY_READ(mac, BWN_PHY_ANALOGOVER);
sav->phy_analogoverval =
BWN_PHY_READ(mac, BWN_PHY_ANALOGOVERVAL);
sav->phy_rfover = BWN_PHY_READ(mac, BWN_PHY_RFOVER);
sav->phy_rfoverval = BWN_PHY_READ(mac, BWN_PHY_RFOVERVAL);
sav->phy_classctl = BWN_PHY_READ(mac, BWN_PHY_CLASSCTL);
sav->phy_cck3 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x3e));
sav->phy_crs0 = BWN_PHY_READ(mac, BWN_PHY_CRS0);
BWN_PHY_MASK(mac, BWN_PHY_CLASSCTL, 0xfffc);
BWN_PHY_MASK(mac, BWN_PHY_CRS0, 0x7fff);
BWN_PHY_SET(mac, BWN_PHY_ANALOGOVER, 0x0003);
BWN_PHY_MASK(mac, BWN_PHY_ANALOGOVERVAL, 0xfffc);
if (phy->type == BWN_PHYTYPE_G) {
if ((phy->rev >= 7) &&
(siba_sprom_get_bf_lo(sc->sc_dev) &
BWN_BFL_EXTLNA)) {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x933);
} else {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0x133);
}
} else {
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, 0);
}
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x3e), 0);
}
sav->reg0 = BWN_READ_2(mac, 0x3f4);
sav->reg1 = BWN_READ_2(mac, 0x3e2);
sav->rf0 = BWN_RF_READ(mac, 0x43);
sav->rf1 = BWN_RF_READ(mac, 0x7a);
sav->phy_pgactl = BWN_PHY_READ(mac, BWN_PHY_PGACTL);
sav->phy_cck2 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x2a));
sav->phy_syncctl = BWN_PHY_READ(mac, BWN_PHY_SYNCCTL);
sav->phy_dacctl = BWN_PHY_READ(mac, BWN_PHY_DACCTL);
if (!BWN_HAS_TXMAG(phy)) {
sav->rf2 = BWN_RF_READ(mac, 0x52);
sav->rf2 &= 0x00f0;
}
if (phy->type == BWN_PHYTYPE_B) {
sav->phy_cck0 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x30));
sav->phy_cck1 = BWN_PHY_READ(mac, BWN_PHY_CCK(0x06));
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), 0x00ff);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x06), 0x3f3f);
} else {
BWN_WRITE_2(mac, 0x3e2, BWN_READ_2(mac, 0x3e2)
| 0x8000);
}
BWN_WRITE_2(mac, 0x3f4, BWN_READ_2(mac, 0x3f4)
& 0xf000);
tmp =
(phy->type == BWN_PHYTYPE_G) ? BWN_PHY_LO_MASK : BWN_PHY_CCK(0x2e);
BWN_PHY_WRITE(mac, tmp, 0x007f);
tmp = sav->phy_syncctl;
BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, tmp & 0xff7f);
tmp = sav->rf1;
BWN_RF_WRITE(mac, 0x007a, tmp & 0xfff0);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2a), 0x8a3);
if (phy->type == BWN_PHYTYPE_G ||
(phy->type == BWN_PHYTYPE_B &&
phy->rf_ver == 0x2050 && phy->rf_rev >= 6)) {
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x1003);
} else
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2b), 0x0802);
if (phy->rev >= 2)
bwn_dummy_transmission(mac, 0, 1);
bwn_phy_g_switch_chan(mac, 6, 0);
BWN_RF_READ(mac, 0x51);
if (phy->type == BWN_PHYTYPE_G)
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0);
nanouptime(&ts);
if (time_before(lo->txctl_measured_time,
(ts.tv_nsec / 1000000 + ts.tv_sec * 1000) - BWN_LO_TXCTL_EXPIRE))
bwn_lo_measure_txctl_values(mac);
if (phy->type == BWN_PHYTYPE_G && phy->rev >= 3)
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0xc078);
else {
if (phy->type == BWN_PHYTYPE_B)
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8078);
else
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, 0x8078);
}
}
static void
bwn_lo_restore(struct bwn_mac *mac, struct bwn_lo_g_value *sav)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
uint16_t tmp;
if (phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, 0xe300);
tmp = (pg->pg_pga_gain << 8);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa0);
DELAY(5);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa2);
DELAY(2);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, tmp | 0xa3);
} else {
tmp = (pg->pg_pga_gain | 0xefa0);
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, tmp);
}
if (phy->type == BWN_PHYTYPE_G) {
if (phy->rev >= 3)
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0xc078);
else
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2e), 0x8078);
if (phy->rev >= 2)
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x0202);
else
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2f), 0x0101);
}
BWN_WRITE_2(mac, 0x3f4, sav->reg0);
BWN_PHY_WRITE(mac, BWN_PHY_PGACTL, sav->phy_pgactl);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x2a), sav->phy_cck2);
BWN_PHY_WRITE(mac, BWN_PHY_SYNCCTL, sav->phy_syncctl);
BWN_PHY_WRITE(mac, BWN_PHY_DACCTL, sav->phy_dacctl);
BWN_RF_WRITE(mac, 0x43, sav->rf0);
BWN_RF_WRITE(mac, 0x7a, sav->rf1);
if (!BWN_HAS_TXMAG(phy)) {
tmp = sav->rf2;
BWN_RF_SETMASK(mac, 0x52, 0xff0f, tmp);
}
BWN_WRITE_2(mac, 0x3e2, sav->reg1);
if (phy->type == BWN_PHYTYPE_B &&
phy->rf_ver == 0x2050 && phy->rf_rev <= 5) {
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x30), sav->phy_cck0);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x06), sav->phy_cck1);
}
if (phy->rev >= 2) {
BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVER, sav->phy_analogover);
BWN_PHY_WRITE(mac, BWN_PHY_ANALOGOVERVAL,
sav->phy_analogoverval);
BWN_PHY_WRITE(mac, BWN_PHY_CLASSCTL, sav->phy_classctl);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVER, sav->phy_rfover);
BWN_PHY_WRITE(mac, BWN_PHY_RFOVERVAL, sav->phy_rfoverval);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x3e), sav->phy_cck3);
BWN_PHY_WRITE(mac, BWN_PHY_CRS0, sav->phy_crs0);
}
if (bwn_has_hwpctl(mac)) {
tmp = (sav->phy_lomask & 0xbfff);
BWN_PHY_WRITE(mac, BWN_PHY_LO_MASK, tmp);
BWN_PHY_WRITE(mac, BWN_PHY_EXTG(0x01), sav->phy_extg);
BWN_PHY_WRITE(mac, BWN_PHY_DACCTL, sav->phy_dacctl_hwpctl);
BWN_PHY_WRITE(mac, BWN_PHY_CCK(0x14), sav->phy_cck4);
BWN_PHY_WRITE(mac, BWN_PHY_HPWR_TSSICTL, sav->phy_hpwr_tssictl);
}
bwn_phy_g_switch_chan(mac, sav->old_channel, 1);
}
static int
bwn_lo_probe_loctl(struct bwn_mac *mac,
struct bwn_loctl *probe, struct bwn_lo_g_sm *d)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_loctl orig, test;
struct bwn_loctl prev = { -100, -100 };
static const struct bwn_loctl modifiers[] = {
{ 1, 1,}, { 1, 0,}, { 1, -1,}, { 0, -1,},
{ -1, -1,}, { -1, 0,}, { -1, 1,}, { 0, 1,}
};
int begin, end, lower = 0, i;
uint16_t feedth;
if (d->curstate == 0) {
begin = 1;
end = 8;
} else if (d->curstate % 2 == 0) {
begin = d->curstate - 1;
end = d->curstate + 1;
} else {
begin = d->curstate - 2;
end = d->curstate + 2;
}
if (begin < 1)
begin += 8;
if (end > 8)
end -= 8;
memcpy(&orig, probe, sizeof(struct bwn_loctl));
i = begin;
d->curstate = i;
while (1) {
KASSERT(i >= 1 && i <= 8, ("%s:%d: fail", __func__, __LINE__));
memcpy(&test, &orig, sizeof(struct bwn_loctl));
test.i += modifiers[i - 1].i * d->multipler;
test.q += modifiers[i - 1].q * d->multipler;
if ((test.i != prev.i || test.q != prev.q) &&
(abs(test.i) <= 16 && abs(test.q) <= 16)) {
bwn_lo_write(mac, &test);
feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain,
pg->pg_pga_gain, pg->pg_trsw_rx_gain);
if (feedth < d->feedth) {
memcpy(probe, &test,
sizeof(struct bwn_loctl));
lower = 1;
d->feedth = feedth;
if (d->nmeasure < 2 && !BWN_HAS_LOOPBACK(phy))
break;
}
}
memcpy(&prev, &test, sizeof(prev));
if (i == end)
break;
if (i == 8)
i = 1;
else
i++;
d->curstate = i;
}
return (lower);
}
static void
bwn_lo_probe_sm(struct bwn_mac *mac, struct bwn_loctl *loctl, int *rxgain)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_lo_g_sm d;
struct bwn_loctl probe;
int lower, repeat, cnt = 0;
uint16_t feedth;
d.nmeasure = 0;
d.multipler = 1;
if (BWN_HAS_LOOPBACK(phy))
d.multipler = 3;
memcpy(&d.loctl, loctl, sizeof(struct bwn_loctl));
repeat = (BWN_HAS_LOOPBACK(phy)) ? 4 : 1;
do {
bwn_lo_write(mac, &d.loctl);
feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain,
pg->pg_pga_gain, pg->pg_trsw_rx_gain);
if (feedth < 0x258) {
if (feedth >= 0x12c)
*rxgain += 6;
else
*rxgain += 3;
feedth = bwn_lo_calcfeed(mac, pg->pg_lna_gain,
pg->pg_pga_gain, pg->pg_trsw_rx_gain);
}
d.feedth = feedth;
d.curstate = 0;
do {
KASSERT(d.curstate >= 0 && d.curstate <= 8,
("%s:%d: fail", __func__, __LINE__));
memcpy(&probe, &d.loctl,
sizeof(struct bwn_loctl));
lower = bwn_lo_probe_loctl(mac, &probe, &d);
if (!lower)
break;
if ((probe.i == d.loctl.i) && (probe.q == d.loctl.q))
break;
memcpy(&d.loctl, &probe, sizeof(struct bwn_loctl));
d.nmeasure++;
} while (d.nmeasure < 24);
memcpy(loctl, &d.loctl, sizeof(struct bwn_loctl));
if (BWN_HAS_LOOPBACK(phy)) {
if (d.feedth > 0x1194)
*rxgain -= 6;
else if (d.feedth < 0x5dc)
*rxgain += 3;
if (cnt == 0) {
if (d.feedth <= 0x5dc) {
d.multipler = 1;
cnt++;
} else
d.multipler = 2;
} else if (cnt == 2)
d.multipler = 1;
}
bwn_lo_measure_gain_values(mac, *rxgain, BWN_HAS_LOOPBACK(phy));
} while (++cnt < repeat);
}
static struct bwn_lo_calib *
bwn_lo_calibset(struct bwn_mac *mac,
const struct bwn_bbatt *bbatt, const struct bwn_rfatt *rfatt)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_loctl loctl = { 0, 0 };
struct bwn_lo_calib *cal;
struct bwn_lo_g_value sval = { 0 };
int rxgain;
uint16_t pad, reg, value;
sval.old_channel = phy->chan;
bwn_mac_suspend(mac);
bwn_lo_save(mac, &sval);
reg = bwn_lo_txctl_regtable(mac, &value, &pad);
BWN_RF_SETMASK(mac, 0x43, 0xfff0, rfatt->att);
BWN_RF_SETMASK(mac, reg, ~value, (rfatt->padmix ? value :0));
rxgain = (rfatt->att * 2) + (bbatt->att / 2);
if (rfatt->padmix)
rxgain -= pad;
if (BWN_HAS_LOOPBACK(phy))
rxgain += pg->pg_max_lb_gain;
bwn_lo_measure_gain_values(mac, rxgain, BWN_HAS_LOOPBACK(phy));
bwn_phy_g_set_bbatt(mac, bbatt->att);
bwn_lo_probe_sm(mac, &loctl, &rxgain);
bwn_lo_restore(mac, &sval);
bwn_mac_enable(mac);
cal = malloc(sizeof(*cal), M_DEVBUF, M_NOWAIT | M_ZERO);
if (!cal) {
device_printf(mac->mac_sc->sc_dev, "out of memory\n");
return (NULL);
}
memcpy(&cal->bbatt, bbatt, sizeof(*bbatt));
memcpy(&cal->rfatt, rfatt, sizeof(*rfatt));
memcpy(&cal->ctl, &loctl, sizeof(loctl));
BWN_GETTIME(cal->calib_time);
return (cal);
}
static struct bwn_lo_calib *
bwn_lo_get_calib(struct bwn_mac *mac, const struct bwn_bbatt *bbatt,
const struct bwn_rfatt *rfatt)
{
struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl;
struct bwn_lo_calib *c;
TAILQ_FOREACH(c, &lo->calib_list, list) {
if (!BWN_BBATTCMP(&c->bbatt, bbatt))
continue;
if (!BWN_RFATTCMP(&c->rfatt, rfatt))
continue;
return (c);
}
c = bwn_lo_calibset(mac, bbatt, rfatt);
if (!c)
return (NULL);
TAILQ_INSERT_TAIL(&lo->calib_list, c, list);
return (c);
}
static void
bwn_phy_g_dc_lookup_init(struct bwn_mac *mac, uint8_t update)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_txpwr_loctl *lo = &pg->pg_loctl;
const struct bwn_rfatt *rfatt;
const struct bwn_bbatt *bbatt;
uint64_t pvector;
int i;
int rf_offset, bb_offset;
uint8_t changed = 0;
KASSERT(BWN_DC_LT_SIZE == 32, ("%s:%d: fail", __func__, __LINE__));
KASSERT(lo->rfatt.len * lo->bbatt.len <= 64,
("%s:%d: fail", __func__, __LINE__));
pvector = lo->power_vector;
if (!update && !pvector)
return;
bwn_mac_suspend(mac);
for (i = 0; i < BWN_DC_LT_SIZE * 2; i++) {
struct bwn_lo_calib *cal;
int idx;
uint16_t val;
if (!update && !(pvector & (((uint64_t)1ULL) << i)))
continue;
bb_offset = i / lo->rfatt.len;
rf_offset = i % lo->rfatt.len;
bbatt = &(lo->bbatt.array[bb_offset]);
rfatt = &(lo->rfatt.array[rf_offset]);
cal = bwn_lo_calibset(mac, bbatt, rfatt);
if (!cal) {
device_printf(sc->sc_dev, "LO: Could not "
"calibrate DC table entry\n");
continue;
}
val = (uint8_t)(cal->ctl.q);
val |= ((uint8_t)(cal->ctl.i)) << 4;
free(cal, M_DEVBUF);
idx = i / 2;
if (i % 2)
lo->dc_lt[idx] = (lo->dc_lt[idx] & 0x00ff)
| ((val & 0x00ff) << 8);
else
lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xff00)
| (val & 0x00ff);
changed = 1;
}
if (changed) {
for (i = 0; i < BWN_DC_LT_SIZE; i++)
BWN_PHY_WRITE(mac, 0x3a0 + i, lo->dc_lt[i]);
}
bwn_mac_enable(mac);
}
static void
bwn_lo_fixup_rfatt(struct bwn_rfatt *rf)
{
if (!rf->padmix)
return;
if ((rf->att != 1) && (rf->att != 2) && (rf->att != 3))
rf->att = 4;
}
static void
bwn_lo_g_adjust(struct bwn_mac *mac)
{
struct bwn_phy_g *pg = &mac->mac_phy.phy_g;
struct bwn_lo_calib *cal;
struct bwn_rfatt rf;
memcpy(&rf, &pg->pg_rfatt, sizeof(rf));
bwn_lo_fixup_rfatt(&rf);
cal = bwn_lo_get_calib(mac, &pg->pg_bbatt, &rf);
if (!cal)
return;
bwn_lo_write(mac, &cal->ctl);
}
static void
bwn_lo_g_init(struct bwn_mac *mac)
{
if (!bwn_has_hwpctl(mac))
return;
bwn_lo_get_powervector(mac);
bwn_phy_g_dc_lookup_init(mac, 1);
}
static void
bwn_mac_suspend(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int i;
uint32_t tmp;
KASSERT(mac->mac_suspended >= 0,
("%s:%d: fail", __func__, __LINE__));
if (mac->mac_suspended == 0) {
bwn_psctl(mac, BWN_PS_AWAKE);
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL)
& ~BWN_MACCTL_ON);
BWN_READ_4(mac, BWN_MACCTL);
for (i = 35; i; i--) {
tmp = BWN_READ_4(mac, BWN_INTR_REASON);
if (tmp & BWN_INTR_MAC_SUSPENDED)
goto out;
DELAY(10);
}
for (i = 40; i; i--) {
tmp = BWN_READ_4(mac, BWN_INTR_REASON);
if (tmp & BWN_INTR_MAC_SUSPENDED)
goto out;
DELAY(1000);
}
device_printf(sc->sc_dev, "MAC suspend failed\n");
}
out:
mac->mac_suspended++;
}
static void
bwn_mac_enable(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint16_t state;
state = bwn_shm_read_2(mac, BWN_SHARED,
BWN_SHARED_UCODESTAT);
if (state != BWN_SHARED_UCODESTAT_SUSPEND &&
state != BWN_SHARED_UCODESTAT_SLEEP)
device_printf(sc->sc_dev, "warn: firmware state (%d)\n", state);
mac->mac_suspended--;
KASSERT(mac->mac_suspended >= 0,
("%s:%d: fail", __func__, __LINE__));
if (mac->mac_suspended == 0) {
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_ON);
BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_MAC_SUSPENDED);
BWN_READ_4(mac, BWN_MACCTL);
BWN_READ_4(mac, BWN_INTR_REASON);
bwn_psctl(mac, 0);
}
}
static void
bwn_psctl(struct bwn_mac *mac, uint32_t flags)
{
struct bwn_softc *sc = mac->mac_sc;
int i;
uint16_t ucstat;
KASSERT(!((flags & BWN_PS_ON) && (flags & BWN_PS_OFF)),
("%s:%d: fail", __func__, __LINE__));
KASSERT(!((flags & BWN_PS_AWAKE) && (flags & BWN_PS_ASLEEP)),
("%s:%d: fail", __func__, __LINE__));
/* XXX forcibly awake and hwps-off */
BWN_WRITE_4(mac, BWN_MACCTL,
(BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_AWAKE) &
~BWN_MACCTL_HWPS);
BWN_READ_4(mac, BWN_MACCTL);
if (siba_get_revid(sc->sc_dev) >= 5) {
for (i = 0; i < 100; i++) {
ucstat = bwn_shm_read_2(mac, BWN_SHARED,
BWN_SHARED_UCODESTAT);
if (ucstat != BWN_SHARED_UCODESTAT_SLEEP)
break;
DELAY(10);
}
}
}
static int16_t
bwn_nrssi_read(struct bwn_mac *mac, uint16_t offset)
{
BWN_PHY_WRITE(mac, BWN_PHY_NRSSI_CTRL, offset);
return ((int16_t)BWN_PHY_READ(mac, BWN_PHY_NRSSI_DATA));
}
static void
bwn_nrssi_threshold(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
int32_t a, b;
int16_t tmp16;
uint16_t tmpu16;
KASSERT(phy->type == BWN_PHYTYPE_G, ("%s: fail", __func__));
if (phy->gmode && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_RSSI)) {
if (!pg->pg_aci_wlan_automatic && pg->pg_aci_enable) {
a = 0x13;
b = 0x12;
} else {
a = 0xe;
b = 0x11;
}
a = a * (pg->pg_nrssi[1] - pg->pg_nrssi[0]);
a += (pg->pg_nrssi[0] << 6);
a += (a < 32) ? 31 : 32;
a = a >> 6;
a = MIN(MAX(a, -31), 31);
b = b * (pg->pg_nrssi[1] - pg->pg_nrssi[0]);
b += (pg->pg_nrssi[0] << 6);
if (b < 32)
b += 31;
else
b += 32;
b = b >> 6;
b = MIN(MAX(b, -31), 31);
tmpu16 = BWN_PHY_READ(mac, 0x048a) & 0xf000;
tmpu16 |= ((uint32_t)b & 0x0000003f);
tmpu16 |= (((uint32_t)a & 0x0000003f) << 6);
BWN_PHY_WRITE(mac, 0x048a, tmpu16);
return;
}
tmp16 = bwn_nrssi_read(mac, 0x20);
if (tmp16 >= 0x20)
tmp16 -= 0x40;
BWN_PHY_SETMASK(mac, 0x048a, 0xf000, (tmp16 < 3) ? 0x09eb : 0x0aed);
}
static void
bwn_nrssi_slope_11g(struct bwn_mac *mac)
{
#define SAVE_RF_MAX 3
#define SAVE_PHY_COMM_MAX 4
#define SAVE_PHY3_MAX 8
static const uint16_t save_rf_regs[SAVE_RF_MAX] =
{ 0x7a, 0x52, 0x43 };
static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] =
{ 0x15, 0x5a, 0x59, 0x58 };
static const uint16_t save_phy3_regs[SAVE_PHY3_MAX] = {
0x002e, 0x002f, 0x080f, BWN_PHY_G_LOCTL,
0x0801, 0x0060, 0x0014, 0x0478
};
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
int32_t i, tmp32, phy3_idx = 0;
uint16_t delta, tmp;
uint16_t save_rf[SAVE_RF_MAX];
uint16_t save_phy_comm[SAVE_PHY_COMM_MAX];
uint16_t save_phy3[SAVE_PHY3_MAX];
uint16_t ant_div, phy0, chan_ex;
int16_t nrssi0, nrssi1;
KASSERT(phy->type == BWN_PHYTYPE_G,
("%s:%d: fail", __func__, __LINE__));
if (phy->rf_rev >= 9)
return;
if (phy->rf_rev == 8)
bwn_nrssi_offset(mac);
BWN_PHY_MASK(mac, BWN_PHY_G_CRS, 0x7fff);
BWN_PHY_MASK(mac, 0x0802, 0xfffc);
/*
* Save RF/PHY registers for later restoration
*/
ant_div = BWN_READ_2(mac, 0x03e2);
BWN_WRITE_2(mac, 0x03e2, BWN_READ_2(mac, 0x03e2) | 0x8000);
for (i = 0; i < SAVE_RF_MAX; ++i)
save_rf[i] = BWN_RF_READ(mac, save_rf_regs[i]);
for (i = 0; i < SAVE_PHY_COMM_MAX; ++i)
save_phy_comm[i] = BWN_PHY_READ(mac, save_phy_comm_regs[i]);
phy0 = BWN_READ_2(mac, BWN_PHY0);
chan_ex = BWN_READ_2(mac, BWN_CHANNEL_EXT);
if (phy->rev >= 3) {
for (i = 0; i < SAVE_PHY3_MAX; ++i)
save_phy3[i] = BWN_PHY_READ(mac, save_phy3_regs[i]);
BWN_PHY_WRITE(mac, 0x002e, 0);
BWN_PHY_WRITE(mac, BWN_PHY_G_LOCTL, 0);
switch (phy->rev) {
case 4:
case 6:
case 7:
BWN_PHY_SET(mac, 0x0478, 0x0100);
BWN_PHY_SET(mac, 0x0801, 0x0040);
break;
case 3:
case 5:
BWN_PHY_MASK(mac, 0x0801, 0xffbf);
break;
}
BWN_PHY_SET(mac, 0x0060, 0x0040);
BWN_PHY_SET(mac, 0x0014, 0x0200);
}
/*
* Calculate nrssi0
*/
BWN_RF_SET(mac, 0x007a, 0x0070);
bwn_set_all_gains(mac, 0, 8, 0);
BWN_RF_MASK(mac, 0x007a, 0x00f7);
if (phy->rev >= 2) {
BWN_PHY_SETMASK(mac, 0x0811, 0xffcf, 0x0030);
BWN_PHY_SETMASK(mac, 0x0812, 0xffcf, 0x0010);
}
BWN_RF_SET(mac, 0x007a, 0x0080);
DELAY(20);
nrssi0 = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f);
if (nrssi0 >= 0x0020)
nrssi0 -= 0x0040;
/*
* Calculate nrssi1
*/
BWN_RF_MASK(mac, 0x007a, 0x007f);
if (phy->rev >= 2)
BWN_PHY_SETMASK(mac, 0x0003, 0xff9f, 0x0040);
BWN_WRITE_2(mac, BWN_CHANNEL_EXT,
BWN_READ_2(mac, BWN_CHANNEL_EXT) | 0x2000);
BWN_RF_SET(mac, 0x007a, 0x000f);
BWN_PHY_WRITE(mac, 0x0015, 0xf330);
if (phy->rev >= 2) {
BWN_PHY_SETMASK(mac, 0x0812, 0xffcf, 0x0020);
BWN_PHY_SETMASK(mac, 0x0811, 0xffcf, 0x0020);
}
bwn_set_all_gains(mac, 3, 0, 1);
if (phy->rf_rev == 8) {
BWN_RF_WRITE(mac, 0x0043, 0x001f);
} else {
tmp = BWN_RF_READ(mac, 0x0052) & 0xff0f;
BWN_RF_WRITE(mac, 0x0052, tmp | 0x0060);
tmp = BWN_RF_READ(mac, 0x0043) & 0xfff0;
BWN_RF_WRITE(mac, 0x0043, tmp | 0x0009);
}
BWN_PHY_WRITE(mac, 0x005a, 0x0480);
BWN_PHY_WRITE(mac, 0x0059, 0x0810);
BWN_PHY_WRITE(mac, 0x0058, 0x000d);
DELAY(20);
nrssi1 = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f);
/*
* Install calculated narrow RSSI values
*/
if (nrssi1 >= 0x0020)
nrssi1 -= 0x0040;
if (nrssi0 == nrssi1)
pg->pg_nrssi_slope = 0x00010000;
else
pg->pg_nrssi_slope = 0x00400000 / (nrssi0 - nrssi1);
if (nrssi0 >= -4) {
pg->pg_nrssi[0] = nrssi1;
pg->pg_nrssi[1] = nrssi0;
}
/*
* Restore saved RF/PHY registers
*/
if (phy->rev >= 3) {
for (phy3_idx = 0; phy3_idx < 4; ++phy3_idx) {
BWN_PHY_WRITE(mac, save_phy3_regs[phy3_idx],
save_phy3[phy3_idx]);
}
}
if (phy->rev >= 2) {
BWN_PHY_MASK(mac, 0x0812, 0xffcf);
BWN_PHY_MASK(mac, 0x0811, 0xffcf);
}
for (i = 0; i < SAVE_RF_MAX; ++i)
BWN_RF_WRITE(mac, save_rf_regs[i], save_rf[i]);
BWN_WRITE_2(mac, 0x03e2, ant_div);
BWN_WRITE_2(mac, 0x03e6, phy0);
BWN_WRITE_2(mac, BWN_CHANNEL_EXT, chan_ex);
for (i = 0; i < SAVE_PHY_COMM_MAX; ++i)
BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]);
bwn_spu_workaround(mac, phy->chan);
BWN_PHY_SET(mac, 0x0802, (0x0001 | 0x0002));
bwn_set_original_gains(mac);
BWN_PHY_SET(mac, BWN_PHY_G_CRS, 0x8000);
if (phy->rev >= 3) {
for (; phy3_idx < SAVE_PHY3_MAX; ++phy3_idx) {
BWN_PHY_WRITE(mac, save_phy3_regs[phy3_idx],
save_phy3[phy3_idx]);
}
}
delta = 0x1f - pg->pg_nrssi[0];
for (i = 0; i < 64; i++) {
tmp32 = (((i - delta) * pg->pg_nrssi_slope) / 0x10000) + 0x3a;
tmp32 = MIN(MAX(tmp32, 0), 0x3f);
pg->pg_nrssi_lt[i] = tmp32;
}
bwn_nrssi_threshold(mac);
#undef SAVE_RF_MAX
#undef SAVE_PHY_COMM_MAX
#undef SAVE_PHY3_MAX
}
static void
bwn_nrssi_offset(struct bwn_mac *mac)
{
#define SAVE_RF_MAX 2
#define SAVE_PHY_COMM_MAX 10
#define SAVE_PHY6_MAX 8
static const uint16_t save_rf_regs[SAVE_RF_MAX] =
{ 0x7a, 0x43 };
static const uint16_t save_phy_comm_regs[SAVE_PHY_COMM_MAX] = {
0x0001, 0x0811, 0x0812, 0x0814,
0x0815, 0x005a, 0x0059, 0x0058,
0x000a, 0x0003
};
static const uint16_t save_phy6_regs[SAVE_PHY6_MAX] = {
0x002e, 0x002f, 0x080f, 0x0810,
0x0801, 0x0060, 0x0014, 0x0478
};
struct bwn_phy *phy = &mac->mac_phy;
int i, phy6_idx = 0;
uint16_t save_rf[SAVE_RF_MAX];
uint16_t save_phy_comm[SAVE_PHY_COMM_MAX];
uint16_t save_phy6[SAVE_PHY6_MAX];
int16_t nrssi;
uint16_t saved = 0xffff;
for (i = 0; i < SAVE_PHY_COMM_MAX; ++i)
save_phy_comm[i] = BWN_PHY_READ(mac, save_phy_comm_regs[i]);
for (i = 0; i < SAVE_RF_MAX; ++i)
save_rf[i] = BWN_RF_READ(mac, save_rf_regs[i]);
BWN_PHY_MASK(mac, 0x0429, 0x7fff);
BWN_PHY_SETMASK(mac, 0x0001, 0x3fff, 0x4000);
BWN_PHY_SET(mac, 0x0811, 0x000c);
BWN_PHY_SETMASK(mac, 0x0812, 0xfff3, 0x0004);
BWN_PHY_MASK(mac, 0x0802, ~(0x1 | 0x2));
if (phy->rev >= 6) {
for (i = 0; i < SAVE_PHY6_MAX; ++i)
save_phy6[i] = BWN_PHY_READ(mac, save_phy6_regs[i]);
BWN_PHY_WRITE(mac, 0x002e, 0);
BWN_PHY_WRITE(mac, 0x002f, 0);
BWN_PHY_WRITE(mac, 0x080f, 0);
BWN_PHY_WRITE(mac, 0x0810, 0);
BWN_PHY_SET(mac, 0x0478, 0x0100);
BWN_PHY_SET(mac, 0x0801, 0x0040);
BWN_PHY_SET(mac, 0x0060, 0x0040);
BWN_PHY_SET(mac, 0x0014, 0x0200);
}
BWN_RF_SET(mac, 0x007a, 0x0070);
BWN_RF_SET(mac, 0x007a, 0x0080);
DELAY(30);
nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f);
if (nrssi >= 0x20)
nrssi -= 0x40;
if (nrssi == 31) {
for (i = 7; i >= 4; i--) {
BWN_RF_WRITE(mac, 0x007b, i);
DELAY(20);
nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) &
0x003f);
if (nrssi >= 0x20)
nrssi -= 0x40;
if (nrssi < 31 && saved == 0xffff)
saved = i;
}
if (saved == 0xffff)
saved = 4;
} else {
BWN_RF_MASK(mac, 0x007a, 0x007f);
if (phy->rev != 1) {
BWN_PHY_SET(mac, 0x0814, 0x0001);
BWN_PHY_MASK(mac, 0x0815, 0xfffe);
}
BWN_PHY_SET(mac, 0x0811, 0x000c);
BWN_PHY_SET(mac, 0x0812, 0x000c);
BWN_PHY_SET(mac, 0x0811, 0x0030);
BWN_PHY_SET(mac, 0x0812, 0x0030);
BWN_PHY_WRITE(mac, 0x005a, 0x0480);
BWN_PHY_WRITE(mac, 0x0059, 0x0810);
BWN_PHY_WRITE(mac, 0x0058, 0x000d);
if (phy->rev == 0)
BWN_PHY_WRITE(mac, 0x0003, 0x0122);
else
BWN_PHY_SET(mac, 0x000a, 0x2000);
if (phy->rev != 1) {
BWN_PHY_SET(mac, 0x0814, 0x0004);
BWN_PHY_MASK(mac, 0x0815, 0xfffb);
}
BWN_PHY_SETMASK(mac, 0x0003, 0xff9f, 0x0040);
BWN_RF_SET(mac, 0x007a, 0x000f);
bwn_set_all_gains(mac, 3, 0, 1);
BWN_RF_SETMASK(mac, 0x0043, 0x00f0, 0x000f);
DELAY(30);
nrssi = (int16_t) ((BWN_PHY_READ(mac, 0x047f) >> 8) & 0x003f);
if (nrssi >= 0x20)
nrssi -= 0x40;
if (nrssi == -32) {
for (i = 0; i < 4; i++) {
BWN_RF_WRITE(mac, 0x007b, i);
DELAY(20);
nrssi = (int16_t)((BWN_PHY_READ(mac,
0x047f) >> 8) & 0x003f);
if (nrssi >= 0x20)
nrssi -= 0x40;
if (nrssi > -31 && saved == 0xffff)
saved = i;
}
if (saved == 0xffff)
saved = 3;
} else
saved = 0;
}
BWN_RF_WRITE(mac, 0x007b, saved);
/*
* Restore saved RF/PHY registers
*/
if (phy->rev >= 6) {
for (phy6_idx = 0; phy6_idx < 4; ++phy6_idx) {
BWN_PHY_WRITE(mac, save_phy6_regs[phy6_idx],
save_phy6[phy6_idx]);
}
}
if (phy->rev != 1) {
for (i = 3; i < 5; i++)
BWN_PHY_WRITE(mac, save_phy_comm_regs[i],
save_phy_comm[i]);
}
for (i = 5; i < SAVE_PHY_COMM_MAX; i++)
BWN_PHY_WRITE(mac, save_phy_comm_regs[i], save_phy_comm[i]);
for (i = SAVE_RF_MAX - 1; i >= 0; --i)
BWN_RF_WRITE(mac, save_rf_regs[i], save_rf[i]);
BWN_PHY_WRITE(mac, 0x0802, BWN_PHY_READ(mac, 0x0802) | 0x1 | 0x2);
BWN_PHY_SET(mac, 0x0429, 0x8000);
bwn_set_original_gains(mac);
if (phy->rev >= 6) {
for (; phy6_idx < SAVE_PHY6_MAX; ++phy6_idx) {
BWN_PHY_WRITE(mac, save_phy6_regs[phy6_idx],
save_phy6[phy6_idx]);
}
}
BWN_PHY_WRITE(mac, save_phy_comm_regs[0], save_phy_comm[0]);
BWN_PHY_WRITE(mac, save_phy_comm_regs[2], save_phy_comm[2]);
BWN_PHY_WRITE(mac, save_phy_comm_regs[1], save_phy_comm[1]);
}
static void
bwn_set_all_gains(struct bwn_mac *mac, int16_t first, int16_t second,
int16_t third)
{
struct bwn_phy *phy = &mac->mac_phy;
uint16_t i;
uint16_t start = 0x08, end = 0x18;
uint16_t tmp;
uint16_t table;
if (phy->rev <= 1) {
start = 0x10;
end = 0x20;
}
table = BWN_OFDMTAB_GAINX;
if (phy->rev <= 1)
table = BWN_OFDMTAB_GAINX_R1;
for (i = 0; i < 4; i++)
bwn_ofdmtab_write_2(mac, table, i, first);
for (i = start; i < end; i++)
bwn_ofdmtab_write_2(mac, table, i, second);
if (third != -1) {
tmp = ((uint16_t) third << 14) | ((uint16_t) third << 6);
BWN_PHY_SETMASK(mac, 0x04a0, 0xbfbf, tmp);
BWN_PHY_SETMASK(mac, 0x04a1, 0xbfbf, tmp);
BWN_PHY_SETMASK(mac, 0x04a2, 0xbfbf, tmp);
}
bwn_dummy_transmission(mac, 0, 1);
}
static void
bwn_set_original_gains(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
uint16_t i, tmp;
uint16_t table;
uint16_t start = 0x0008, end = 0x0018;
if (phy->rev <= 1) {
start = 0x0010;
end = 0x0020;
}
table = BWN_OFDMTAB_GAINX;
if (phy->rev <= 1)
table = BWN_OFDMTAB_GAINX_R1;
for (i = 0; i < 4; i++) {
tmp = (i & 0xfffc);
tmp |= (i & 0x0001) << 1;
tmp |= (i & 0x0002) >> 1;
bwn_ofdmtab_write_2(mac, table, i, tmp);
}
for (i = start; i < end; i++)
bwn_ofdmtab_write_2(mac, table, i, i - start);
BWN_PHY_SETMASK(mac, 0x04a0, 0xbfbf, 0x4040);
BWN_PHY_SETMASK(mac, 0x04a1, 0xbfbf, 0x4040);
BWN_PHY_SETMASK(mac, 0x04a2, 0xbfbf, 0x4000);
bwn_dummy_transmission(mac, 0, 1);
}
static void
bwn_phy_hwpctl_init(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_rfatt old_rfatt, rfatt;
struct bwn_bbatt old_bbatt, bbatt;
struct bwn_softc *sc = mac->mac_sc;
uint8_t old_txctl = 0;
KASSERT(phy->type == BWN_PHYTYPE_G,
("%s:%d: fail", __func__, __LINE__));
if ((siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM) &&
(siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306))
return;
BWN_PHY_WRITE(mac, 0x0028, 0x8018);
BWN_WRITE_2(mac, BWN_PHY0, BWN_READ_2(mac, BWN_PHY0) & 0xffdf);
if (!phy->gmode)
return;
bwn_hwpctl_early_init(mac);
if (pg->pg_curtssi == 0) {
if (phy->rf_ver == 0x2050 && phy->analog == 0) {
BWN_RF_SETMASK(mac, 0x0076, 0x00f7, 0x0084);
} else {
memcpy(&old_rfatt, &pg->pg_rfatt, sizeof(old_rfatt));
memcpy(&old_bbatt, &pg->pg_bbatt, sizeof(old_bbatt));
old_txctl = pg->pg_txctl;
bbatt.att = 11;
if (phy->rf_rev == 8) {
rfatt.att = 15;
rfatt.padmix = 1;
} else {
rfatt.att = 9;
rfatt.padmix = 0;
}
bwn_phy_g_set_txpwr_sub(mac, &bbatt, &rfatt, 0);
}
bwn_dummy_transmission(mac, 0, 1);
pg->pg_curtssi = BWN_PHY_READ(mac, BWN_PHY_TSSI);
if (phy->rf_ver == 0x2050 && phy->analog == 0)
BWN_RF_MASK(mac, 0x0076, 0xff7b);
else
bwn_phy_g_set_txpwr_sub(mac, &old_bbatt,
&old_rfatt, old_txctl);
}
bwn_hwpctl_init_gphy(mac);
/* clear TSSI */
bwn_shm_write_2(mac, BWN_SHARED, 0x0058, 0x7f7f);
bwn_shm_write_2(mac, BWN_SHARED, 0x005a, 0x7f7f);
bwn_shm_write_2(mac, BWN_SHARED, 0x0070, 0x7f7f);
bwn_shm_write_2(mac, BWN_SHARED, 0x0072, 0x7f7f);
}
static void
bwn_hwpctl_early_init(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
if (!bwn_has_hwpctl(mac)) {
BWN_PHY_WRITE(mac, 0x047a, 0xc111);
return;
}
BWN_PHY_MASK(mac, 0x0036, 0xfeff);
BWN_PHY_WRITE(mac, 0x002f, 0x0202);
BWN_PHY_SET(mac, 0x047c, 0x0002);
BWN_PHY_SET(mac, 0x047a, 0xf000);
if (phy->rf_ver == 0x2050 && phy->rf_rev == 8) {
BWN_PHY_SETMASK(mac, 0x047a, 0xff0f, 0x0010);
BWN_PHY_SET(mac, 0x005d, 0x8000);
BWN_PHY_SETMASK(mac, 0x004e, 0xffc0, 0x0010);
BWN_PHY_WRITE(mac, 0x002e, 0xc07f);
BWN_PHY_SET(mac, 0x0036, 0x0400);
} else {
BWN_PHY_SET(mac, 0x0036, 0x0200);
BWN_PHY_SET(mac, 0x0036, 0x0400);
BWN_PHY_MASK(mac, 0x005d, 0x7fff);
BWN_PHY_MASK(mac, 0x004f, 0xfffe);
BWN_PHY_SETMASK(mac, 0x004e, 0xffc0, 0x0010);
BWN_PHY_WRITE(mac, 0x002e, 0xc07f);
BWN_PHY_SETMASK(mac, 0x047a, 0xff0f, 0x0010);
}
}
static void
bwn_hwpctl_init_gphy(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_txpwr_loctl *lo = &pg->pg_loctl;
int i;
uint16_t nr_written = 0, tmp, value;
uint8_t rf, bb;
if (!bwn_has_hwpctl(mac)) {
bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_HW_POWERCTL);
return;
}
BWN_PHY_SETMASK(mac, 0x0036, 0xffc0,
(pg->pg_idletssi - pg->pg_curtssi));
BWN_PHY_SETMASK(mac, 0x0478, 0xff00,
(pg->pg_idletssi - pg->pg_curtssi));
for (i = 0; i < 32; i++)
bwn_ofdmtab_write_2(mac, 0x3c20, i, pg->pg_tssi2dbm[i]);
for (i = 32; i < 64; i++)
bwn_ofdmtab_write_2(mac, 0x3c00, i - 32, pg->pg_tssi2dbm[i]);
for (i = 0; i < 64; i += 2) {
value = (uint16_t) pg->pg_tssi2dbm[i];
value |= ((uint16_t) pg->pg_tssi2dbm[i + 1]) << 8;
BWN_PHY_WRITE(mac, 0x380 + (i / 2), value);
}
for (rf = 0; rf < lo->rfatt.len; rf++) {
for (bb = 0; bb < lo->bbatt.len; bb++) {
if (nr_written >= 0x40)
return;
tmp = lo->bbatt.array[bb].att;
tmp <<= 8;
if (phy->rf_rev == 8)
tmp |= 0x50;
else
tmp |= 0x40;
tmp |= lo->rfatt.array[rf].att;
BWN_PHY_WRITE(mac, 0x3c0 + nr_written, tmp);
nr_written++;
}
}
BWN_PHY_MASK(mac, 0x0060, 0xffbf);
BWN_PHY_WRITE(mac, 0x0014, 0x0000);
KASSERT(phy->rev >= 6, ("%s:%d: fail", __func__, __LINE__));
BWN_PHY_SET(mac, 0x0478, 0x0800);
BWN_PHY_MASK(mac, 0x0478, 0xfeff);
BWN_PHY_MASK(mac, 0x0801, 0xffbf);
bwn_phy_g_dc_lookup_init(mac, 1);
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_HW_POWERCTL);
}
static void
bwn_phy_g_switch_chan(struct bwn_mac *mac, int channel, uint8_t spu)
{
struct bwn_softc *sc = mac->mac_sc;
if (spu != 0)
bwn_spu_workaround(mac, channel);
BWN_WRITE_2(mac, BWN_CHANNEL, bwn_phy_g_chan2freq(channel));
if (channel == 14) {
if (siba_sprom_get_ccode(sc->sc_dev) == SIBA_CCODE_JAPAN)
bwn_hf_write(mac,
bwn_hf_read(mac) & ~BWN_HF_JAPAN_CHAN14_OFF);
else
bwn_hf_write(mac,
bwn_hf_read(mac) | BWN_HF_JAPAN_CHAN14_OFF);
BWN_WRITE_2(mac, BWN_CHANNEL_EXT,
BWN_READ_2(mac, BWN_CHANNEL_EXT) | (1 << 11));
return;
}
BWN_WRITE_2(mac, BWN_CHANNEL_EXT,
BWN_READ_2(mac, BWN_CHANNEL_EXT) & 0xf7bf);
}
static uint16_t
bwn_phy_g_chan2freq(uint8_t channel)
{
static const uint8_t bwn_phy_g_rf_channels[] = BWN_PHY_G_RF_CHANNELS;
KASSERT(channel >= 1 && channel <= 14,
("%s:%d: fail", __func__, __LINE__));
return (bwn_phy_g_rf_channels[channel - 1]);
}
static void
bwn_phy_g_set_txpwr_sub(struct bwn_mac *mac, const struct bwn_bbatt *bbatt,
const struct bwn_rfatt *rfatt, uint8_t txctl)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_txpwr_loctl *lo = &pg->pg_loctl;
uint16_t bb, rf;
uint16_t tx_bias, tx_magn;
bb = bbatt->att;
rf = rfatt->att;
tx_bias = lo->tx_bias;
tx_magn = lo->tx_magn;
if (tx_bias == 0xff)
tx_bias = 0;
pg->pg_txctl = txctl;
memmove(&pg->pg_rfatt, rfatt, sizeof(*rfatt));
pg->pg_rfatt.padmix = (txctl & BWN_TXCTL_TXMIX) ? 1 : 0;
memmove(&pg->pg_bbatt, bbatt, sizeof(*bbatt));
bwn_phy_g_set_bbatt(mac, bb);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RADIO_ATT, rf);
if (phy->rf_ver == 0x2050 && phy->rf_rev == 8)
BWN_RF_WRITE(mac, 0x43, (rf & 0x000f) | (txctl & 0x0070));
else {
BWN_RF_SETMASK(mac, 0x43, 0xfff0, (rf & 0x000f));
BWN_RF_SETMASK(mac, 0x52, ~0x0070, (txctl & 0x0070));
}
if (BWN_HAS_TXMAG(phy))
BWN_RF_WRITE(mac, 0x52, tx_magn | tx_bias);
else
BWN_RF_SETMASK(mac, 0x52, 0xfff0, (tx_bias & 0x000f));
bwn_lo_g_adjust(mac);
}
static void
bwn_phy_g_set_bbatt(struct bwn_mac *mac,
uint16_t bbatt)
{
struct bwn_phy *phy = &mac->mac_phy;
if (phy->analog == 0) {
BWN_WRITE_2(mac, BWN_PHY0,
(BWN_READ_2(mac, BWN_PHY0) & 0xfff0) | bbatt);
return;
}
if (phy->analog > 1) {
BWN_PHY_SETMASK(mac, BWN_PHY_DACCTL, 0xffc3, bbatt << 2);
return;
}
BWN_PHY_SETMASK(mac, BWN_PHY_DACCTL, 0xff87, bbatt << 3);
}
static uint16_t
bwn_rf_2050_rfoverval(struct bwn_mac *mac, uint16_t reg, uint32_t lpd)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_g *pg = &phy->phy_g;
struct bwn_softc *sc = mac->mac_sc;
int max_lb_gain;
uint16_t extlna;
uint16_t i;
if (phy->gmode == 0)
return (0);
if (BWN_HAS_LOOPBACK(phy)) {
max_lb_gain = pg->pg_max_lb_gain;
max_lb_gain += (phy->rf_rev == 8) ? 0x3e : 0x26;
if (max_lb_gain >= 0x46) {
extlna = 0x3000;
max_lb_gain -= 0x46;
} else if (max_lb_gain >= 0x3a) {
extlna = 0x1000;
max_lb_gain -= 0x3a;
} else if (max_lb_gain >= 0x2e) {
extlna = 0x2000;
max_lb_gain -= 0x2e;
} else {
extlna = 0;
max_lb_gain -= 0x10;
}
for (i = 0; i < 16; i++) {
max_lb_gain -= (i * 6);
if (max_lb_gain < 6)
break;
}
if ((phy->rev < 7) ||
!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) {
if (reg == BWN_PHY_RFOVER) {
return (0x1b3);
} else if (reg == BWN_PHY_RFOVERVAL) {
extlna |= (i << 8);
switch (lpd) {
case BWN_LPD(0, 1, 1):
return (0x0f92);
case BWN_LPD(0, 0, 1):
case BWN_LPD(1, 0, 1):
return (0x0092 | extlna);
case BWN_LPD(1, 0, 0):
return (0x0093 | extlna);
}
KASSERT(0 == 1,
("%s:%d: fail", __func__, __LINE__));
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
} else {
if (reg == BWN_PHY_RFOVER)
return (0x9b3);
if (reg == BWN_PHY_RFOVERVAL) {
if (extlna)
extlna |= 0x8000;
extlna |= (i << 8);
switch (lpd) {
case BWN_LPD(0, 1, 1):
return (0x8f92);
case BWN_LPD(0, 0, 1):
return (0x8092 | extlna);
case BWN_LPD(1, 0, 1):
return (0x2092 | extlna);
case BWN_LPD(1, 0, 0):
return (0x2093 | extlna);
}
KASSERT(0 == 1,
("%s:%d: fail", __func__, __LINE__));
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
return (0);
}
if ((phy->rev < 7) ||
!(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_EXTLNA)) {
if (reg == BWN_PHY_RFOVER) {
return (0x1b3);
} else if (reg == BWN_PHY_RFOVERVAL) {
switch (lpd) {
case BWN_LPD(0, 1, 1):
return (0x0fb2);
case BWN_LPD(0, 0, 1):
return (0x00b2);
case BWN_LPD(1, 0, 1):
return (0x30b2);
case BWN_LPD(1, 0, 0):
return (0x30b3);
}
KASSERT(0 == 1,
("%s:%d: fail", __func__, __LINE__));
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
} else {
if (reg == BWN_PHY_RFOVER) {
return (0x9b3);
} else if (reg == BWN_PHY_RFOVERVAL) {
switch (lpd) {
case BWN_LPD(0, 1, 1):
return (0x8fb2);
case BWN_LPD(0, 0, 1):
return (0x80b2);
case BWN_LPD(1, 0, 1):
return (0x20b2);
case BWN_LPD(1, 0, 0):
return (0x20b3);
}
KASSERT(0 == 1,
("%s:%d: fail", __func__, __LINE__));
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
return (0);
}
static void
bwn_spu_workaround(struct bwn_mac *mac, uint8_t channel)
{
if (mac->mac_phy.rf_ver != 0x2050 || mac->mac_phy.rf_rev >= 6)
return;
BWN_WRITE_2(mac, BWN_CHANNEL, (channel <= 10) ?
bwn_phy_g_chan2freq(channel + 4) : bwn_phy_g_chan2freq(1));
DELAY(1000);
BWN_WRITE_2(mac, BWN_CHANNEL, bwn_phy_g_chan2freq(channel));
}
static int
bwn_fw_gets(struct bwn_mac *mac, enum bwn_fwtype type)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_fw *fw = &mac->mac_fw;
const uint8_t rev = siba_get_revid(sc->sc_dev);
const char *filename;
uint32_t high;
int error;
/* microcode */
if (rev >= 5 && rev <= 10)
filename = "ucode5";
else if (rev >= 11 && rev <= 12)
filename = "ucode11";
else if (rev == 13)
filename = "ucode13";
else if (rev == 14)
filename = "ucode14";
else if (rev >= 15)
filename = "ucode15";
else {
device_printf(sc->sc_dev, "no ucode for rev %d\n", rev);
bwn_release_firmware(mac);
return (EOPNOTSUPP);
}
error = bwn_fw_get(mac, type, filename, &fw->ucode);
if (error) {
bwn_release_firmware(mac);
return (error);
}
/* PCM */
KASSERT(fw->no_pcmfile == 0, ("%s:%d fail", __func__, __LINE__));
if (rev >= 5 && rev <= 10) {
error = bwn_fw_get(mac, type, "pcm5", &fw->pcm);
if (error == ENOENT)
fw->no_pcmfile = 1;
else if (error) {
bwn_release_firmware(mac);
return (error);
}
} else if (rev < 11) {
device_printf(sc->sc_dev, "no PCM for rev %d\n", rev);
return (EOPNOTSUPP);
}
/* initvals */
high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH);
switch (mac->mac_phy.type) {
case BWN_PHYTYPE_A:
if (rev < 5 || rev > 10)
goto fail1;
if (high & BWN_TGSHIGH_HAVE_2GHZ)
filename = "a0g1initvals5";
else
filename = "a0g0initvals5";
break;
case BWN_PHYTYPE_G:
if (rev >= 5 && rev <= 10)
filename = "b0g0initvals5";
else if (rev >= 13)
filename = "b0g0initvals13";
else
goto fail1;
break;
case BWN_PHYTYPE_LP:
if (rev == 13)
filename = "lp0initvals13";
else if (rev == 14)
filename = "lp0initvals14";
else if (rev >= 15)
filename = "lp0initvals15";
else
goto fail1;
break;
case BWN_PHYTYPE_N:
if (rev >= 11 && rev <= 12)
filename = "n0initvals11";
else
goto fail1;
break;
default:
goto fail1;
}
error = bwn_fw_get(mac, type, filename, &fw->initvals);
if (error) {
bwn_release_firmware(mac);
return (error);
}
/* bandswitch initvals */
switch (mac->mac_phy.type) {
case BWN_PHYTYPE_A:
if (rev >= 5 && rev <= 10) {
if (high & BWN_TGSHIGH_HAVE_2GHZ)
filename = "a0g1bsinitvals5";
else
filename = "a0g0bsinitvals5";
} else if (rev >= 11)
filename = NULL;
else
goto fail1;
break;
case BWN_PHYTYPE_G:
if (rev >= 5 && rev <= 10)
filename = "b0g0bsinitvals5";
else if (rev >= 11)
filename = NULL;
else
goto fail1;
break;
case BWN_PHYTYPE_LP:
if (rev == 13)
filename = "lp0bsinitvals13";
else if (rev == 14)
filename = "lp0bsinitvals14";
else if (rev >= 15)
filename = "lp0bsinitvals15";
else
goto fail1;
break;
case BWN_PHYTYPE_N:
if (rev >= 11 && rev <= 12)
filename = "n0bsinitvals11";
else
goto fail1;
break;
default:
goto fail1;
}
error = bwn_fw_get(mac, type, filename, &fw->initvals_band);
if (error) {
bwn_release_firmware(mac);
return (error);
}
return (0);
fail1:
device_printf(sc->sc_dev, "no INITVALS for rev %d\n", rev);
bwn_release_firmware(mac);
return (EOPNOTSUPP);
}
static int
bwn_fw_get(struct bwn_mac *mac, enum bwn_fwtype type,
const char *name, struct bwn_fwfile *bfw)
{
const struct bwn_fwhdr *hdr;
struct bwn_softc *sc = mac->mac_sc;
const struct firmware *fw;
char namebuf[64];
if (name == NULL) {
bwn_do_release_fw(bfw);
return (0);
}
if (bfw->filename != NULL) {
if (bfw->type == type && (strcmp(bfw->filename, name) == 0))
return (0);
bwn_do_release_fw(bfw);
}
snprintf(namebuf, sizeof(namebuf), "bwn%s_v4_%s%s",
(type == BWN_FWTYPE_OPENSOURCE) ? "-open" : "",
(mac->mac_phy.type == BWN_PHYTYPE_LP) ? "lp_" : "", name);
/* XXX Sleeping on "fwload" with the non-sleepable locks held */
fw = firmware_get(namebuf);
if (fw == NULL) {
device_printf(sc->sc_dev, "the fw file(%s) not found\n",
namebuf);
return (ENOENT);
}
if (fw->datasize < sizeof(struct bwn_fwhdr))
goto fail;
hdr = (const struct bwn_fwhdr *)(fw->data);
switch (hdr->type) {
case BWN_FWTYPE_UCODE:
case BWN_FWTYPE_PCM:
if (be32toh(hdr->size) !=
(fw->datasize - sizeof(struct bwn_fwhdr)))
goto fail;
/* FALLTHROUGH */
case BWN_FWTYPE_IV:
if (hdr->ver != 1)
goto fail;
break;
default:
goto fail;
}
bfw->filename = name;
bfw->fw = fw;
bfw->type = type;
return (0);
fail:
device_printf(sc->sc_dev, "the fw file(%s) format error\n", namebuf);
if (fw != NULL)
firmware_put(fw, FIRMWARE_UNLOAD);
return (EPROTO);
}
static void
bwn_release_firmware(struct bwn_mac *mac)
{
bwn_do_release_fw(&mac->mac_fw.ucode);
bwn_do_release_fw(&mac->mac_fw.pcm);
bwn_do_release_fw(&mac->mac_fw.initvals);
bwn_do_release_fw(&mac->mac_fw.initvals_band);
}
static void
bwn_do_release_fw(struct bwn_fwfile *bfw)
{
if (bfw->fw != NULL)
firmware_put(bfw->fw, FIRMWARE_UNLOAD);
bfw->fw = NULL;
bfw->filename = NULL;
}
static int
bwn_fw_loaducode(struct bwn_mac *mac)
{
#define GETFWOFFSET(fwp, offset) \
((const uint32_t *)((const char *)fwp.fw->data + offset))
#define GETFWSIZE(fwp, offset) \
((fwp.fw->datasize - offset) / sizeof(uint32_t))
struct bwn_softc *sc = mac->mac_sc;
const uint32_t *data;
unsigned int i;
uint32_t ctl;
uint16_t date, fwcaps, time;
int error = 0;
ctl = BWN_READ_4(mac, BWN_MACCTL);
ctl |= BWN_MACCTL_MCODE_JMP0;
KASSERT(!(ctl & BWN_MACCTL_MCODE_RUN), ("%s:%d: fail", __func__,
__LINE__));
BWN_WRITE_4(mac, BWN_MACCTL, ctl);
for (i = 0; i < 64; i++)
bwn_shm_write_2(mac, BWN_SCRATCH, i, 0);
for (i = 0; i < 4096; i += 2)
bwn_shm_write_2(mac, BWN_SHARED, i, 0);
data = GETFWOFFSET(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr));
bwn_shm_ctlword(mac, BWN_UCODE | BWN_SHARED_AUTOINC, 0x0000);
for (i = 0; i < GETFWSIZE(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr));
i++) {
BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i]));
DELAY(10);
}
if (mac->mac_fw.pcm.fw) {
data = GETFWOFFSET(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr));
bwn_shm_ctlword(mac, BWN_HW, 0x01ea);
BWN_WRITE_4(mac, BWN_SHM_DATA, 0x00004000);
bwn_shm_ctlword(mac, BWN_HW, 0x01eb);
for (i = 0; i < GETFWSIZE(mac->mac_fw.pcm,
sizeof(struct bwn_fwhdr)); i++) {
BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i]));
DELAY(10);
}
}
BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_ALL);
BWN_WRITE_4(mac, BWN_MACCTL,
(BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_JMP0) |
BWN_MACCTL_MCODE_RUN);
for (i = 0; i < 21; i++) {
if (BWN_READ_4(mac, BWN_INTR_REASON) == BWN_INTR_MAC_SUSPENDED)
break;
if (i >= 20) {
device_printf(sc->sc_dev, "ucode timeout\n");
error = ENXIO;
goto error;
}
DELAY(50000);
}
BWN_READ_4(mac, BWN_INTR_REASON);
mac->mac_fw.rev = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_REV);
if (mac->mac_fw.rev <= 0x128) {
device_printf(sc->sc_dev, "the firmware is too old\n");
error = EOPNOTSUPP;
goto error;
}
mac->mac_fw.patch = bwn_shm_read_2(mac, BWN_SHARED,
BWN_SHARED_UCODE_PATCH);
date = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_DATE);
mac->mac_fw.opensource = (date == 0xffff);
if (bwn_wme != 0)
mac->mac_flags |= BWN_MAC_FLAG_WME;
mac->mac_flags |= BWN_MAC_FLAG_HWCRYPTO;
time = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_TIME);
if (mac->mac_fw.opensource == 0) {
device_printf(sc->sc_dev,
"firmware version (rev %u patch %u date %#x time %#x)\n",
mac->mac_fw.rev, mac->mac_fw.patch, date, time);
if (mac->mac_fw.no_pcmfile)
device_printf(sc->sc_dev,
"no HW crypto acceleration due to pcm5\n");
} else {
mac->mac_fw.patch = time;
fwcaps = bwn_fwcaps_read(mac);
if (!(fwcaps & BWN_FWCAPS_HWCRYPTO) || mac->mac_fw.no_pcmfile) {
device_printf(sc->sc_dev,
"disabling HW crypto acceleration\n");
mac->mac_flags &= ~BWN_MAC_FLAG_HWCRYPTO;
}
if (!(fwcaps & BWN_FWCAPS_WME)) {
device_printf(sc->sc_dev, "disabling WME support\n");
mac->mac_flags &= ~BWN_MAC_FLAG_WME;
}
}
if (BWN_ISOLDFMT(mac))
device_printf(sc->sc_dev, "using old firmware image\n");
return (0);
error:
BWN_WRITE_4(mac, BWN_MACCTL,
(BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_RUN) |
BWN_MACCTL_MCODE_JMP0);
return (error);
#undef GETFWSIZE
#undef GETFWOFFSET
}
/* OpenFirmware only */
static uint16_t
bwn_fwcaps_read(struct bwn_mac *mac)
{
KASSERT(mac->mac_fw.opensource == 1,
("%s:%d: fail", __func__, __LINE__));
return (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_FWCAPS));
}
static int
bwn_fwinitvals_write(struct bwn_mac *mac, const struct bwn_fwinitvals *ivals,
size_t count, size_t array_size)
{
#define GET_NEXTIV16(iv) \
((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \
sizeof(uint16_t) + sizeof(uint16_t)))
#define GET_NEXTIV32(iv) \
((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \
sizeof(uint16_t) + sizeof(uint32_t)))
struct bwn_softc *sc = mac->mac_sc;
const struct bwn_fwinitvals *iv;
uint16_t offset;
size_t i;
uint8_t bit32;
KASSERT(sizeof(struct bwn_fwinitvals) == 6,
("%s:%d: fail", __func__, __LINE__));
iv = ivals;
for (i = 0; i < count; i++) {
if (array_size < sizeof(iv->offset_size))
goto fail;
array_size -= sizeof(iv->offset_size);
offset = be16toh(iv->offset_size);
bit32 = (offset & BWN_FWINITVALS_32BIT) ? 1 : 0;
offset &= BWN_FWINITVALS_OFFSET_MASK;
if (offset >= 0x1000)
goto fail;
if (bit32) {
if (array_size < sizeof(iv->data.d32))
goto fail;
array_size -= sizeof(iv->data.d32);
BWN_WRITE_4(mac, offset, be32toh(iv->data.d32));
iv = GET_NEXTIV32(iv);
} else {
if (array_size < sizeof(iv->data.d16))
goto fail;
array_size -= sizeof(iv->data.d16);
BWN_WRITE_2(mac, offset, be16toh(iv->data.d16));
iv = GET_NEXTIV16(iv);
}
}
if (array_size != 0)
goto fail;
return (0);
fail:
device_printf(sc->sc_dev, "initvals: invalid format\n");
return (EPROTO);
#undef GET_NEXTIV16
#undef GET_NEXTIV32
}
static int
bwn_switch_channel(struct bwn_mac *mac, int chan)
{
struct bwn_phy *phy = &(mac->mac_phy);
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t channelcookie, savedcookie;
int error;
if (chan == 0xffff)
chan = phy->get_default_chan(mac);
channelcookie = chan;
if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
channelcookie |= 0x100;
savedcookie = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_CHAN);
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, channelcookie);
error = phy->switch_channel(mac, chan);
if (error)
goto fail;
mac->mac_phy.chan = chan;
DELAY(8000);
return (0);
fail:
device_printf(sc->sc_dev, "failed to switch channel\n");
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, savedcookie);
return (error);
}
static uint16_t
bwn_ant2phy(int antenna)
{
switch (antenna) {
case BWN_ANT0:
return (BWN_TX_PHY_ANT0);
case BWN_ANT1:
return (BWN_TX_PHY_ANT1);
case BWN_ANT2:
return (BWN_TX_PHY_ANT2);
case BWN_ANT3:
return (BWN_TX_PHY_ANT3);
case BWN_ANTAUTO:
return (BWN_TX_PHY_ANT01AUTO);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (0);
}
static void
bwn_wme_load(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int i;
KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams),
("%s:%d: fail", __func__, __LINE__));
bwn_mac_suspend(mac);
for (i = 0; i < N(sc->sc_wmeParams); i++)
bwn_wme_loadparams(mac, &(sc->sc_wmeParams[i]),
bwn_wme_shm_offsets[i]);
bwn_mac_enable(mac);
}
static void
bwn_wme_loadparams(struct bwn_mac *mac,
const struct wmeParams *p, uint16_t shm_offset)
{
#define SM(_v, _f) (((_v) << _f##_S) & _f)
struct bwn_softc *sc = mac->mac_sc;
uint16_t params[BWN_NR_WMEPARAMS];
int slot, tmp;
unsigned int i;
slot = BWN_READ_2(mac, BWN_RNG) &
SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN);
memset(&params, 0, sizeof(params));
DPRINTF(sc, BWN_DEBUG_WME, "wmep_txopLimit %d wmep_logcwmin %d "
"wmep_logcwmax %d wmep_aifsn %d\n", p->wmep_txopLimit,
p->wmep_logcwmin, p->wmep_logcwmax, p->wmep_aifsn);
params[BWN_WMEPARAM_TXOP] = p->wmep_txopLimit * 32;
params[BWN_WMEPARAM_CWMIN] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN);
params[BWN_WMEPARAM_CWMAX] = SM(p->wmep_logcwmax, WME_PARAM_LOGCWMAX);
params[BWN_WMEPARAM_CWCUR] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN);
params[BWN_WMEPARAM_AIFS] = p->wmep_aifsn;
params[BWN_WMEPARAM_BSLOTS] = slot;
params[BWN_WMEPARAM_REGGAP] = slot + p->wmep_aifsn;
for (i = 0; i < N(params); i++) {
if (i == BWN_WMEPARAM_STATUS) {
tmp = bwn_shm_read_2(mac, BWN_SHARED,
shm_offset + (i * 2));
tmp |= 0x100;
bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2),
tmp);
} else {
bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2),
params[i]);
}
}
}
static void
bwn_mac_write_bssid(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t tmp;
int i;
uint8_t mac_bssid[IEEE80211_ADDR_LEN * 2];
bwn_mac_setfilter(mac, BWN_MACFILTER_BSSID, sc->sc_bssid);
- memcpy(mac_bssid, sc->sc_macaddr, IEEE80211_ADDR_LEN);
+ memcpy(mac_bssid, sc->sc_ic.ic_macaddr, IEEE80211_ADDR_LEN);
memcpy(mac_bssid + IEEE80211_ADDR_LEN, sc->sc_bssid,
IEEE80211_ADDR_LEN);
for (i = 0; i < N(mac_bssid); i += sizeof(uint32_t)) {
tmp = (uint32_t) (mac_bssid[i + 0]);
tmp |= (uint32_t) (mac_bssid[i + 1]) << 8;
tmp |= (uint32_t) (mac_bssid[i + 2]) << 16;
tmp |= (uint32_t) (mac_bssid[i + 3]) << 24;
bwn_ram_write(mac, 0x20 + i, tmp);
}
}
static void
bwn_mac_setfilter(struct bwn_mac *mac, uint16_t offset,
const uint8_t *macaddr)
{
static const uint8_t zero[IEEE80211_ADDR_LEN] = { 0 };
uint16_t data;
if (!mac)
macaddr = zero;
offset |= 0x0020;
BWN_WRITE_2(mac, BWN_MACFILTER_CONTROL, offset);
data = macaddr[0];
data |= macaddr[1] << 8;
BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data);
data = macaddr[2];
data |= macaddr[3] << 8;
BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data);
data = macaddr[4];
data |= macaddr[5] << 8;
BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data);
}
static void
bwn_key_dowrite(struct bwn_mac *mac, uint8_t index, uint8_t algorithm,
const uint8_t *key, size_t key_len, const uint8_t *mac_addr)
{
uint8_t buf[BWN_SEC_KEYSIZE] = { 0, };
uint8_t per_sta_keys_start = 8;
if (BWN_SEC_NEWAPI(mac))
per_sta_keys_start = 4;
KASSERT(index < mac->mac_max_nr_keys,
("%s:%d: fail", __func__, __LINE__));
KASSERT(key_len <= BWN_SEC_KEYSIZE,
("%s:%d: fail", __func__, __LINE__));
if (index >= per_sta_keys_start)
bwn_key_macwrite(mac, index, NULL);
if (key)
memcpy(buf, key, key_len);
bwn_key_write(mac, index, algorithm, buf);
if (index >= per_sta_keys_start)
bwn_key_macwrite(mac, index, mac_addr);
mac->mac_key[index].algorithm = algorithm;
}
static void
bwn_key_macwrite(struct bwn_mac *mac, uint8_t index, const uint8_t *addr)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t addrtmp[2] = { 0, 0 };
uint8_t start = 8;
if (BWN_SEC_NEWAPI(mac))
start = 4;
KASSERT(index >= start,
("%s:%d: fail", __func__, __LINE__));
index -= start;
if (addr) {
addrtmp[0] = addr[0];
addrtmp[0] |= ((uint32_t) (addr[1]) << 8);
addrtmp[0] |= ((uint32_t) (addr[2]) << 16);
addrtmp[0] |= ((uint32_t) (addr[3]) << 24);
addrtmp[1] = addr[4];
addrtmp[1] |= ((uint32_t) (addr[5]) << 8);
}
if (siba_get_revid(sc->sc_dev) >= 5) {
bwn_shm_write_4(mac, BWN_RCMTA, (index * 2) + 0, addrtmp[0]);
bwn_shm_write_2(mac, BWN_RCMTA, (index * 2) + 1, addrtmp[1]);
} else {
if (index >= 8) {
bwn_shm_write_4(mac, BWN_SHARED,
BWN_SHARED_PSM + (index * 6) + 0, addrtmp[0]);
bwn_shm_write_2(mac, BWN_SHARED,
BWN_SHARED_PSM + (index * 6) + 4, addrtmp[1]);
}
}
}
static void
bwn_key_write(struct bwn_mac *mac, uint8_t index, uint8_t algorithm,
const uint8_t *key)
{
unsigned int i;
uint32_t offset;
uint16_t kidx, value;
kidx = BWN_SEC_KEY2FW(mac, index);
bwn_shm_write_2(mac, BWN_SHARED,
BWN_SHARED_KEYIDX_BLOCK + (kidx * 2), (kidx << 4) | algorithm);
offset = mac->mac_ktp + (index * BWN_SEC_KEYSIZE);
for (i = 0; i < BWN_SEC_KEYSIZE; i += 2) {
value = key[i];
value |= (uint16_t)(key[i + 1]) << 8;
bwn_shm_write_2(mac, BWN_SHARED, offset + i, value);
}
}
static void
bwn_phy_exit(struct bwn_mac *mac)
{
mac->mac_phy.rf_onoff(mac, 0);
if (mac->mac_phy.exit != NULL)
mac->mac_phy.exit(mac);
}
static void
bwn_dma_free(struct bwn_mac *mac)
{
struct bwn_dma *dma;
if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0)
return;
dma = &mac->mac_method.dma;
bwn_dma_ringfree(&dma->rx);
bwn_dma_ringfree(&dma->wme[WME_AC_BK]);
bwn_dma_ringfree(&dma->wme[WME_AC_BE]);
bwn_dma_ringfree(&dma->wme[WME_AC_VI]);
bwn_dma_ringfree(&dma->wme[WME_AC_VO]);
bwn_dma_ringfree(&dma->mcast);
}
static void
bwn_core_stop(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
BWN_ASSERT_LOCKED(sc);
if (mac->mac_status < BWN_MAC_STATUS_STARTED)
return;
callout_stop(&sc->sc_rfswitch_ch);
callout_stop(&sc->sc_task_ch);
callout_stop(&sc->sc_watchdog_ch);
sc->sc_watchdog_timer = 0;
BWN_WRITE_4(mac, BWN_INTR_MASK, 0);
BWN_READ_4(mac, BWN_INTR_MASK);
bwn_mac_suspend(mac);
mac->mac_status = BWN_MAC_STATUS_INITED;
}
static int
bwn_switch_band(struct bwn_softc *sc, struct ieee80211_channel *chan)
{
struct bwn_mac *up_dev = NULL;
struct bwn_mac *down_dev;
struct bwn_mac *mac;
int err, status;
uint8_t gmode;
BWN_ASSERT_LOCKED(sc);
TAILQ_FOREACH(mac, &sc->sc_maclist, mac_list) {
if (IEEE80211_IS_CHAN_2GHZ(chan) &&
mac->mac_phy.supports_2ghz) {
up_dev = mac;
gmode = 1;
} else if (IEEE80211_IS_CHAN_5GHZ(chan) &&
mac->mac_phy.supports_5ghz) {
up_dev = mac;
gmode = 0;
} else {
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (EINVAL);
}
if (up_dev != NULL)
break;
}
if (up_dev == NULL) {
device_printf(sc->sc_dev, "Could not find a device\n");
return (ENODEV);
}
if (up_dev == sc->sc_curmac && sc->sc_curmac->mac_phy.gmode == gmode)
return (0);
device_printf(sc->sc_dev, "switching to %s-GHz band\n",
IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5");
down_dev = sc->sc_curmac;
status = down_dev->mac_status;
if (status >= BWN_MAC_STATUS_STARTED)
bwn_core_stop(down_dev);
if (status >= BWN_MAC_STATUS_INITED)
bwn_core_exit(down_dev);
if (down_dev != up_dev)
bwn_phy_reset(down_dev);
up_dev->mac_phy.gmode = gmode;
if (status >= BWN_MAC_STATUS_INITED) {
err = bwn_core_init(up_dev);
if (err) {
device_printf(sc->sc_dev,
"fatal: failed to initialize for %s-GHz\n",
IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5");
goto fail;
}
}
if (status >= BWN_MAC_STATUS_STARTED)
bwn_core_start(up_dev);
KASSERT(up_dev->mac_status == status, ("%s: fail", __func__));
sc->sc_curmac = up_dev;
return (0);
fail:
sc->sc_curmac = NULL;
return (err);
}
static void
bwn_rf_turnon(struct bwn_mac *mac)
{
bwn_mac_suspend(mac);
mac->mac_phy.rf_onoff(mac, 1);
mac->mac_phy.rf_on = 1;
bwn_mac_enable(mac);
}
static void
bwn_rf_turnoff(struct bwn_mac *mac)
{
bwn_mac_suspend(mac);
mac->mac_phy.rf_onoff(mac, 0);
mac->mac_phy.rf_on = 0;
bwn_mac_enable(mac);
}
static void
bwn_phy_reset(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
siba_write_4(sc->sc_dev, SIBA_TGSLOW,
((siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~BWN_TGSLOW_SUPPORT_G) |
BWN_TGSLOW_PHYRESET) | SIBA_TGSLOW_FGC);
DELAY(1000);
siba_write_4(sc->sc_dev, SIBA_TGSLOW,
(siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~SIBA_TGSLOW_FGC) |
BWN_TGSLOW_PHYRESET);
DELAY(1000);
}
static int
bwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct bwn_vap *bvp = BWN_VAP(vap);
struct ieee80211com *ic= vap->iv_ic;
- struct ifnet *ifp = ic->ic_ifp;
enum ieee80211_state ostate = vap->iv_state;
struct bwn_softc *sc = ic->ic_softc;
struct bwn_mac *mac = sc->sc_curmac;
int error;
DPRINTF(sc, BWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]);
error = bvp->bv_newstate(vap, nstate, arg);
if (error != 0)
return (error);
BWN_LOCK(sc);
bwn_led_newstate(mac, nstate);
/*
* Clear the BSSID when we stop a STA
*/
if (vap->iv_opmode == IEEE80211_M_STA) {
if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
/*
* Clear out the BSSID. If we reassociate to
* the same AP, this will reinialize things
* correctly...
*/
if (ic->ic_opmode == IEEE80211_M_STA &&
(sc->sc_flags & BWN_FLAG_INVALID) == 0) {
memset(sc->sc_bssid, 0, IEEE80211_ADDR_LEN);
bwn_set_macaddr(mac);
}
}
}
if (vap->iv_opmode == IEEE80211_M_MONITOR ||
vap->iv_opmode == IEEE80211_M_AHDEMO) {
/* XXX nothing to do? */
} else if (nstate == IEEE80211_S_RUN) {
memcpy(sc->sc_bssid, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN);
- memcpy(sc->sc_macaddr, IF_LLADDR(ifp), IEEE80211_ADDR_LEN);
bwn_set_opmode(mac);
bwn_set_pretbtt(mac);
bwn_spu_setdelay(mac, 0);
bwn_set_macaddr(mac);
}
BWN_UNLOCK(sc);
return (error);
}
static void
bwn_set_pretbtt(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t pretbtt;
if (ic->ic_opmode == IEEE80211_M_IBSS)
pretbtt = 2;
else
pretbtt = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 120 : 250;
bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PRETBTT, pretbtt);
BWN_WRITE_2(mac, BWN_TSF_CFP_PRETBTT, pretbtt);
}
static int
bwn_intr(void *arg)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
uint32_t reason;
if (mac->mac_status < BWN_MAC_STATUS_STARTED ||
(sc->sc_flags & BWN_FLAG_INVALID))
return (FILTER_STRAY);
reason = BWN_READ_4(mac, BWN_INTR_REASON);
if (reason == 0xffffffff) /* shared IRQ */
return (FILTER_STRAY);
reason &= mac->mac_intr_mask;
if (reason == 0)
return (FILTER_HANDLED);
mac->mac_reason[0] = BWN_READ_4(mac, BWN_DMA0_REASON) & 0x0001dc00;
mac->mac_reason[1] = BWN_READ_4(mac, BWN_DMA1_REASON) & 0x0000dc00;
mac->mac_reason[2] = BWN_READ_4(mac, BWN_DMA2_REASON) & 0x0000dc00;
mac->mac_reason[3] = BWN_READ_4(mac, BWN_DMA3_REASON) & 0x0001dc00;
mac->mac_reason[4] = BWN_READ_4(mac, BWN_DMA4_REASON) & 0x0000dc00;
BWN_WRITE_4(mac, BWN_INTR_REASON, reason);
BWN_WRITE_4(mac, BWN_DMA0_REASON, mac->mac_reason[0]);
BWN_WRITE_4(mac, BWN_DMA1_REASON, mac->mac_reason[1]);
BWN_WRITE_4(mac, BWN_DMA2_REASON, mac->mac_reason[2]);
BWN_WRITE_4(mac, BWN_DMA3_REASON, mac->mac_reason[3]);
BWN_WRITE_4(mac, BWN_DMA4_REASON, mac->mac_reason[4]);
/* Disable interrupts. */
BWN_WRITE_4(mac, BWN_INTR_MASK, 0);
mac->mac_reason_intr = reason;
BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ);
BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE);
taskqueue_enqueue_fast(sc->sc_tq, &mac->mac_intrtask);
return (FILTER_HANDLED);
}
static void
bwn_intrtask(void *arg, int npending)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t merged = 0;
int i, tx = 0, rx = 0;
BWN_LOCK(sc);
if (mac->mac_status < BWN_MAC_STATUS_STARTED ||
(sc->sc_flags & BWN_FLAG_INVALID)) {
BWN_UNLOCK(sc);
return;
}
for (i = 0; i < N(mac->mac_reason); i++)
merged |= mac->mac_reason[i];
if (mac->mac_reason_intr & BWN_INTR_MAC_TXERR)
device_printf(sc->sc_dev, "MAC trans error\n");
if (mac->mac_reason_intr & BWN_INTR_PHY_TXERR) {
DPRINTF(sc, BWN_DEBUG_INTR, "%s: PHY trans error\n", __func__);
mac->mac_phy.txerrors--;
if (mac->mac_phy.txerrors == 0) {
mac->mac_phy.txerrors = BWN_TXERROR_MAX;
bwn_restart(mac, "PHY TX errors");
}
}
if (merged & (BWN_DMAINTR_FATALMASK | BWN_DMAINTR_NONFATALMASK)) {
if (merged & BWN_DMAINTR_FATALMASK) {
device_printf(sc->sc_dev,
"Fatal DMA error: %#x %#x %#x %#x %#x %#x\n",
mac->mac_reason[0], mac->mac_reason[1],
mac->mac_reason[2], mac->mac_reason[3],
mac->mac_reason[4], mac->mac_reason[5]);
bwn_restart(mac, "DMA error");
BWN_UNLOCK(sc);
return;
}
if (merged & BWN_DMAINTR_NONFATALMASK) {
device_printf(sc->sc_dev,
"DMA error: %#x %#x %#x %#x %#x %#x\n",
mac->mac_reason[0], mac->mac_reason[1],
mac->mac_reason[2], mac->mac_reason[3],
mac->mac_reason[4], mac->mac_reason[5]);
}
}
if (mac->mac_reason_intr & BWN_INTR_UCODE_DEBUG)
bwn_intr_ucode_debug(mac);
if (mac->mac_reason_intr & BWN_INTR_TBTT_INDI)
bwn_intr_tbtt_indication(mac);
if (mac->mac_reason_intr & BWN_INTR_ATIM_END)
bwn_intr_atim_end(mac);
if (mac->mac_reason_intr & BWN_INTR_BEACON)
bwn_intr_beacon(mac);
if (mac->mac_reason_intr & BWN_INTR_PMQ)
bwn_intr_pmq(mac);
if (mac->mac_reason_intr & BWN_INTR_NOISESAMPLE_OK)
bwn_intr_noise(mac);
if (mac->mac_flags & BWN_MAC_FLAG_DMA) {
if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) {
bwn_dma_rx(mac->mac_method.dma.rx);
rx = 1;
}
} else
rx = bwn_pio_rx(&mac->mac_method.pio.rx);
KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
KASSERT(!(mac->mac_reason[3] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
KASSERT(!(mac->mac_reason[4] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
KASSERT(!(mac->mac_reason[5] & BWN_DMAINTR_RX_DONE), ("%s", __func__));
if (mac->mac_reason_intr & BWN_INTR_TX_OK) {
bwn_intr_txeof(mac);
tx = 1;
}
BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask);
if (sc->sc_blink_led != NULL && sc->sc_led_blink) {
int evt = BWN_LED_EVENT_NONE;
if (tx && rx) {
if (sc->sc_rx_rate > sc->sc_tx_rate)
evt = BWN_LED_EVENT_RX;
else
evt = BWN_LED_EVENT_TX;
} else if (tx) {
evt = BWN_LED_EVENT_TX;
} else if (rx) {
evt = BWN_LED_EVENT_RX;
} else if (rx == 0) {
evt = BWN_LED_EVENT_POLL;
}
if (evt != BWN_LED_EVENT_NONE)
bwn_led_event(mac, evt);
}
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
- if (!IFQ_IS_EMPTY(&ifp->if_snd))
- bwn_start_locked(ifp);
- }
+ if (mbufq_first(&sc->sc_snd) != NULL)
+ bwn_start(sc);
BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ);
BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE);
BWN_UNLOCK(sc);
}
static void
bwn_restart(struct bwn_mac *mac, const char *msg)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
if (mac->mac_status < BWN_MAC_STATUS_INITED)
return;
device_printf(sc->sc_dev, "HW reset: %s\n", msg);
ieee80211_runtask(ic, &mac->mac_hwreset);
}
static void
bwn_intr_ucode_debug(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint16_t reason;
if (mac->mac_fw.opensource == 0)
return;
reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG);
switch (reason) {
case BWN_DEBUGINTR_PANIC:
bwn_handle_fwpanic(mac);
break;
case BWN_DEBUGINTR_DUMP_SHM:
device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_SHM\n");
break;
case BWN_DEBUGINTR_DUMP_REGS:
device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_REGS\n");
break;
case BWN_DEBUGINTR_MARKER:
device_printf(sc->sc_dev, "BWN_DEBUGINTR_MARKER\n");
break;
default:
device_printf(sc->sc_dev,
"ucode debug unknown reason: %#x\n", reason);
}
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG,
BWN_DEBUGINTR_ACK);
}
static void
bwn_intr_tbtt_indication(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
if (ic->ic_opmode != IEEE80211_M_HOSTAP)
bwn_psctl(mac, 0);
if (ic->ic_opmode == IEEE80211_M_IBSS)
mac->mac_flags |= BWN_MAC_FLAG_DFQVALID;
}
static void
bwn_intr_atim_end(struct bwn_mac *mac)
{
if (mac->mac_flags & BWN_MAC_FLAG_DFQVALID) {
BWN_WRITE_4(mac, BWN_MACCMD,
BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_DFQ_VALID);
mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID;
}
}
static void
bwn_intr_beacon(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t cmd, beacon0, beacon1;
if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS)
return;
mac->mac_intr_mask &= ~BWN_INTR_BEACON;
cmd = BWN_READ_4(mac, BWN_MACCMD);
beacon0 = (cmd & BWN_MACCMD_BEACON0_VALID);
beacon1 = (cmd & BWN_MACCMD_BEACON1_VALID);
if (beacon0 && beacon1) {
BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_BEACON);
mac->mac_intr_mask |= BWN_INTR_BEACON;
return;
}
if (sc->sc_flags & BWN_FLAG_NEED_BEACON_TP) {
sc->sc_flags &= ~BWN_FLAG_NEED_BEACON_TP;
bwn_load_beacon0(mac);
bwn_load_beacon1(mac);
cmd = BWN_READ_4(mac, BWN_MACCMD);
cmd |= BWN_MACCMD_BEACON0_VALID;
BWN_WRITE_4(mac, BWN_MACCMD, cmd);
} else {
if (!beacon0) {
bwn_load_beacon0(mac);
cmd = BWN_READ_4(mac, BWN_MACCMD);
cmd |= BWN_MACCMD_BEACON0_VALID;
BWN_WRITE_4(mac, BWN_MACCMD, cmd);
} else if (!beacon1) {
bwn_load_beacon1(mac);
cmd = BWN_READ_4(mac, BWN_MACCMD);
cmd |= BWN_MACCMD_BEACON1_VALID;
BWN_WRITE_4(mac, BWN_MACCMD, cmd);
}
}
}
static void
bwn_intr_pmq(struct bwn_mac *mac)
{
uint32_t tmp;
while (1) {
tmp = BWN_READ_4(mac, BWN_PS_STATUS);
if (!(tmp & 0x00000008))
break;
}
BWN_WRITE_2(mac, BWN_PS_STATUS, 0x0002);
}
static void
bwn_intr_noise(struct bwn_mac *mac)
{
struct bwn_phy_g *pg = &mac->mac_phy.phy_g;
uint16_t tmp;
uint8_t noise[4];
uint8_t i, j;
int32_t average;
if (mac->mac_phy.type != BWN_PHYTYPE_G)
return;
KASSERT(mac->mac_noise.noi_running, ("%s: fail", __func__));
*((uint32_t *)noise) = htole32(bwn_jssi_read(mac));
if (noise[0] == 0x7f || noise[1] == 0x7f || noise[2] == 0x7f ||
noise[3] == 0x7f)
goto new;
KASSERT(mac->mac_noise.noi_nsamples < 8,
("%s:%d: fail", __func__, __LINE__));
i = mac->mac_noise.noi_nsamples;
noise[0] = MIN(MAX(noise[0], 0), N(pg->pg_nrssi_lt) - 1);
noise[1] = MIN(MAX(noise[1], 0), N(pg->pg_nrssi_lt) - 1);
noise[2] = MIN(MAX(noise[2], 0), N(pg->pg_nrssi_lt) - 1);
noise[3] = MIN(MAX(noise[3], 0), N(pg->pg_nrssi_lt) - 1);
mac->mac_noise.noi_samples[i][0] = pg->pg_nrssi_lt[noise[0]];
mac->mac_noise.noi_samples[i][1] = pg->pg_nrssi_lt[noise[1]];
mac->mac_noise.noi_samples[i][2] = pg->pg_nrssi_lt[noise[2]];
mac->mac_noise.noi_samples[i][3] = pg->pg_nrssi_lt[noise[3]];
mac->mac_noise.noi_nsamples++;
if (mac->mac_noise.noi_nsamples == 8) {
average = 0;
for (i = 0; i < 8; i++) {
for (j = 0; j < 4; j++)
average += mac->mac_noise.noi_samples[i][j];
}
average = (((average / 32) * 125) + 64) / 128;
tmp = (bwn_shm_read_2(mac, BWN_SHARED, 0x40c) / 128) & 0x1f;
if (tmp >= 8)
average += 2;
else
average -= 25;
average -= (tmp == 8) ? 72 : 48;
mac->mac_stats.link_noise = average;
mac->mac_noise.noi_running = 0;
return;
}
new:
bwn_noise_gensample(mac);
}
static int
bwn_pio_rx(struct bwn_pio_rxqueue *prq)
{
struct bwn_mac *mac = prq->prq_mac;
struct bwn_softc *sc = mac->mac_sc;
unsigned int i;
BWN_ASSERT_LOCKED(sc);
if (mac->mac_status < BWN_MAC_STATUS_STARTED)
return (0);
for (i = 0; i < 5000; i++) {
if (bwn_pio_rxeof(prq) == 0)
break;
}
if (i >= 5000)
device_printf(sc->sc_dev, "too many RX frames in PIO mode\n");
return ((i > 0) ? 1 : 0);
}
static void
bwn_dma_rx(struct bwn_dma_ring *dr)
{
int slot, curslot;
KASSERT(!dr->dr_tx, ("%s:%d: fail", __func__, __LINE__));
curslot = dr->get_curslot(dr);
KASSERT(curslot >= 0 && curslot < dr->dr_numslots,
("%s:%d: fail", __func__, __LINE__));
slot = dr->dr_curslot;
for (; slot != curslot; slot = bwn_dma_nextslot(dr, slot))
bwn_dma_rxeof(dr, &slot);
bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap,
BUS_DMASYNC_PREWRITE);
dr->set_curslot(dr, slot);
dr->dr_curslot = slot;
}
static void
bwn_intr_txeof(struct bwn_mac *mac)
{
struct bwn_txstatus stat;
uint32_t stat0, stat1;
uint16_t tmp;
BWN_ASSERT_LOCKED(mac->mac_sc);
while (1) {
stat0 = BWN_READ_4(mac, BWN_XMITSTAT_0);
if (!(stat0 & 0x00000001))
break;
stat1 = BWN_READ_4(mac, BWN_XMITSTAT_1);
stat.cookie = (stat0 >> 16);
stat.seq = (stat1 & 0x0000ffff);
stat.phy_stat = ((stat1 & 0x00ff0000) >> 16);
tmp = (stat0 & 0x0000ffff);
stat.framecnt = ((tmp & 0xf000) >> 12);
stat.rtscnt = ((tmp & 0x0f00) >> 8);
stat.sreason = ((tmp & 0x001c) >> 2);
stat.pm = (tmp & 0x0080) ? 1 : 0;
stat.im = (tmp & 0x0040) ? 1 : 0;
stat.ampdu = (tmp & 0x0020) ? 1 : 0;
stat.ack = (tmp & 0x0002) ? 1 : 0;
bwn_handle_txeof(mac, &stat);
}
}
static void
bwn_hwreset(void *arg, int npending)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
int error = 0;
int prev_status;
BWN_LOCK(sc);
prev_status = mac->mac_status;
if (prev_status >= BWN_MAC_STATUS_STARTED)
bwn_core_stop(mac);
if (prev_status >= BWN_MAC_STATUS_INITED)
bwn_core_exit(mac);
if (prev_status >= BWN_MAC_STATUS_INITED) {
error = bwn_core_init(mac);
if (error)
goto out;
}
if (prev_status >= BWN_MAC_STATUS_STARTED)
bwn_core_start(mac);
out:
if (error) {
device_printf(sc->sc_dev, "%s: failed (%d)\n", __func__, error);
sc->sc_curmac = NULL;
}
BWN_UNLOCK(sc);
}
static void
bwn_handle_fwpanic(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
uint16_t reason;
reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_FWPANIC_REASON_REG);
device_printf(sc->sc_dev,"fw panic (%u)\n", reason);
if (reason == BWN_FWPANIC_RESTART)
bwn_restart(mac, "ucode panic");
}
static void
bwn_load_beacon0(struct bwn_mac *mac)
{
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
static void
bwn_load_beacon1(struct bwn_mac *mac)
{
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
static uint32_t
bwn_jssi_read(struct bwn_mac *mac)
{
uint32_t val = 0;
val = bwn_shm_read_2(mac, BWN_SHARED, 0x08a);
val <<= 16;
val |= bwn_shm_read_2(mac, BWN_SHARED, 0x088);
return (val);
}
static void
bwn_noise_gensample(struct bwn_mac *mac)
{
uint32_t jssi = 0x7f7f7f7f;
bwn_shm_write_2(mac, BWN_SHARED, 0x088, (jssi & 0x0000ffff));
bwn_shm_write_2(mac, BWN_SHARED, 0x08a, (jssi & 0xffff0000) >> 16);
BWN_WRITE_4(mac, BWN_MACCMD,
BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_BGNOISE);
}
static int
bwn_dma_freeslot(struct bwn_dma_ring *dr)
{
BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc);
return (dr->dr_numslots - dr->dr_usedslot);
}
static int
bwn_dma_nextslot(struct bwn_dma_ring *dr, int slot)
{
BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc);
KASSERT(slot >= -1 && slot <= dr->dr_numslots - 1,
("%s:%d: fail", __func__, __LINE__));
if (slot == dr->dr_numslots - 1)
return (0);
return (slot + 1);
}
static void
bwn_dma_rxeof(struct bwn_dma_ring *dr, int *slot)
{
struct bwn_mac *mac = dr->dr_mac;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *meta;
struct bwn_rxhdr4 *rxhdr;
- struct ifnet *ifp = sc->sc_ifp;
struct mbuf *m;
uint32_t macstat;
int32_t tmp;
int cnt = 0;
uint16_t len;
dr->getdesc(dr, *slot, &desc, &meta);
bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_POSTREAD);
m = meta->mt_m;
if (bwn_dma_newbuf(dr, desc, meta, 0)) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_ierrors, 1);
return;
}
rxhdr = mtod(m, struct bwn_rxhdr4 *);
len = le16toh(rxhdr->frame_len);
if (len <= 0) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_ierrors, 1);
return;
}
if (bwn_dma_check_redzone(dr, m)) {
device_printf(sc->sc_dev, "redzone error.\n");
bwn_dma_set_redzone(dr, m);
bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap,
BUS_DMASYNC_PREWRITE);
return;
}
if (len > dr->dr_rx_bufsize) {
tmp = len;
while (1) {
dr->getdesc(dr, *slot, &desc, &meta);
bwn_dma_set_redzone(dr, meta->mt_m);
bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap,
BUS_DMASYNC_PREWRITE);
*slot = bwn_dma_nextslot(dr, *slot);
cnt++;
tmp -= dr->dr_rx_bufsize;
if (tmp <= 0)
break;
}
device_printf(sc->sc_dev, "too small buffer "
"(len %u buffer %u dropped %d)\n",
len, dr->dr_rx_bufsize, cnt);
return;
}
macstat = le32toh(rxhdr->mac_status);
if (macstat & BWN_RX_MAC_FCSERR) {
if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) {
device_printf(sc->sc_dev, "RX drop\n");
return;
}
}
- m->m_pkthdr.rcvif = ifp;
m->m_len = m->m_pkthdr.len = len + dr->dr_frameoffset;
m_adj(m, dr->dr_frameoffset);
bwn_rxeof(dr->dr_mac, m, rxhdr);
}
static void
bwn_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status)
{
struct bwn_dma_ring *dr;
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *meta;
struct bwn_pio_txqueue *tq;
struct bwn_pio_txpkt *tp = NULL;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_stats *stats = &mac->mac_stats;
struct ieee80211_node *ni;
struct ieee80211vap *vap;
int retrycnt = 0, slot;
BWN_ASSERT_LOCKED(mac->mac_sc);
if (status->im)
device_printf(sc->sc_dev, "TODO: STATUS IM\n");
if (status->ampdu)
device_printf(sc->sc_dev, "TODO: STATUS AMPDU\n");
if (status->rtscnt) {
if (status->rtscnt == 0xf)
stats->rtsfail++;
else
stats->rts++;
}
if (mac->mac_flags & BWN_MAC_FLAG_DMA) {
if (status->ack) {
dr = bwn_dma_parse_cookie(mac, status,
status->cookie, &slot);
if (dr == NULL) {
device_printf(sc->sc_dev,
"failed to parse cookie\n");
return;
}
while (1) {
dr->getdesc(dr, slot, &desc, &meta);
if (meta->mt_islast) {
ni = meta->mt_ni;
vap = ni->ni_vap;
ieee80211_ratectl_tx_complete(vap, ni,
status->ack ?
IEEE80211_RATECTL_TX_SUCCESS :
IEEE80211_RATECTL_TX_FAILURE,
&retrycnt, 0);
break;
}
slot = bwn_dma_nextslot(dr, slot);
}
}
bwn_dma_handle_txeof(mac, status);
} else {
if (status->ack) {
tq = bwn_pio_parse_cookie(mac, status->cookie, &tp);
if (tq == NULL) {
device_printf(sc->sc_dev,
"failed to parse cookie\n");
return;
}
ni = tp->tp_ni;
vap = ni->ni_vap;
ieee80211_ratectl_tx_complete(vap, ni,
status->ack ?
IEEE80211_RATECTL_TX_SUCCESS :
IEEE80211_RATECTL_TX_FAILURE,
&retrycnt, 0);
}
bwn_pio_handle_txeof(mac, status);
}
bwn_phy_txpower_check(mac, 0);
}
static uint8_t
bwn_pio_rxeof(struct bwn_pio_rxqueue *prq)
{
struct bwn_mac *mac = prq->prq_mac;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_rxhdr4 rxhdr;
- struct ifnet *ifp = sc->sc_ifp;
struct mbuf *m;
uint32_t ctl32, macstat, v32;
unsigned int i, padding;
uint16_t ctl16, len, totlen, v16;
unsigned char *mp;
char *data;
memset(&rxhdr, 0, sizeof(rxhdr));
if (prq->prq_rev >= 8) {
ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL);
if (!(ctl32 & BWN_PIO8_RXCTL_FRAMEREADY))
return (0);
bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL,
BWN_PIO8_RXCTL_FRAMEREADY);
for (i = 0; i < 10; i++) {
ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL);
if (ctl32 & BWN_PIO8_RXCTL_DATAREADY)
goto ready;
DELAY(10);
}
} else {
ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL);
if (!(ctl16 & BWN_PIO_RXCTL_FRAMEREADY))
return (0);
bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL,
BWN_PIO_RXCTL_FRAMEREADY);
for (i = 0; i < 10; i++) {
ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL);
if (ctl16 & BWN_PIO_RXCTL_DATAREADY)
goto ready;
DELAY(10);
}
}
device_printf(sc->sc_dev, "%s: timed out\n", __func__);
return (1);
ready:
if (prq->prq_rev >= 8)
siba_read_multi_4(sc->sc_dev, &rxhdr, sizeof(rxhdr),
prq->prq_base + BWN_PIO8_RXDATA);
else
siba_read_multi_2(sc->sc_dev, &rxhdr, sizeof(rxhdr),
prq->prq_base + BWN_PIO_RXDATA);
len = le16toh(rxhdr.frame_len);
if (len > 0x700) {
device_printf(sc->sc_dev, "%s: len is too big\n", __func__);
goto error;
}
if (len == 0) {
device_printf(sc->sc_dev, "%s: len is 0\n", __func__);
goto error;
}
macstat = le32toh(rxhdr.mac_status);
if (macstat & BWN_RX_MAC_FCSERR) {
if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) {
device_printf(sc->sc_dev, "%s: FCS error", __func__);
goto error;
}
}
padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0;
totlen = len + padding;
KASSERT(totlen <= MCLBYTES, ("too big..\n"));
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->sc_dev, "%s: out of memory", __func__);
goto error;
}
mp = mtod(m, unsigned char *);
if (prq->prq_rev >= 8) {
siba_read_multi_4(sc->sc_dev, mp, (totlen & ~3),
prq->prq_base + BWN_PIO8_RXDATA);
if (totlen & 3) {
v32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXDATA);
data = &(mp[totlen - 1]);
switch (totlen & 3) {
case 3:
*data = (v32 >> 16);
data--;
case 2:
*data = (v32 >> 8);
data--;
case 1:
*data = v32;
}
}
} else {
siba_read_multi_2(sc->sc_dev, mp, (totlen & ~1),
prq->prq_base + BWN_PIO_RXDATA);
if (totlen & 1) {
v16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXDATA);
mp[totlen - 1] = v16;
}
}
- m->m_pkthdr.rcvif = ifp;
m->m_len = m->m_pkthdr.len = totlen;
bwn_rxeof(prq->prq_mac, m, &rxhdr);
return (1);
error:
if (prq->prq_rev >= 8)
bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL,
BWN_PIO8_RXCTL_DATAREADY);
else
bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_DATAREADY);
return (1);
}
static int
bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc,
struct bwn_dmadesc_meta *meta, int init)
{
struct bwn_mac *mac = dr->dr_mac;
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_rxhdr4 *hdr;
bus_dmamap_t map;
bus_addr_t paddr;
struct mbuf *m;
int error;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
error = ENOBUFS;
/*
* If the NIC is up and running, we need to:
* - Clear RX buffer's header.
* - Restore RX descriptor settings.
*/
if (init)
return (error);
else
goto back;
}
m->m_len = m->m_pkthdr.len = MCLBYTES;
bwn_dma_set_redzone(dr, m);
/*
* Try to load RX buf into temporary DMA map
*/
error = bus_dmamap_load_mbuf(dma->rxbuf_dtag, dr->dr_spare_dmap, m,
bwn_dma_buf_addr, &paddr, BUS_DMA_NOWAIT);
if (error) {
m_freem(m);
/*
* See the comment above
*/
if (init)
return (error);
else
goto back;
}
if (!init)
bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap);
meta->mt_m = m;
meta->mt_paddr = paddr;
/*
* Swap RX buf's DMA map with the loaded temporary one
*/
map = meta->mt_dmap;
meta->mt_dmap = dr->dr_spare_dmap;
dr->dr_spare_dmap = map;
back:
/*
* Clear RX buf header
*/
hdr = mtod(meta->mt_m, struct bwn_rxhdr4 *);
bzero(hdr, sizeof(*hdr));
bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap,
BUS_DMASYNC_PREWRITE);
/*
* Setup RX buf descriptor
*/
dr->setdesc(dr, desc, meta->mt_paddr, meta->mt_m->m_len -
sizeof(*hdr), 0, 0, 0);
return (error);
}
static void
bwn_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg,
bus_size_t mapsz __unused, int error)
{
if (!error) {
KASSERT(nseg == 1, ("too many segments(%d)\n", nseg));
*((bus_addr_t *)arg) = seg->ds_addr;
}
}
static int
bwn_hwrate2ieeerate(int rate)
{
switch (rate) {
case BWN_CCK_RATE_1MB:
return (2);
case BWN_CCK_RATE_2MB:
return (4);
case BWN_CCK_RATE_5MB:
return (11);
case BWN_CCK_RATE_11MB:
return (22);
case BWN_OFDM_RATE_6MB:
return (12);
case BWN_OFDM_RATE_9MB:
return (18);
case BWN_OFDM_RATE_12MB:
return (24);
case BWN_OFDM_RATE_18MB:
return (36);
case BWN_OFDM_RATE_24MB:
return (48);
case BWN_OFDM_RATE_36MB:
return (72);
case BWN_OFDM_RATE_48MB:
return (96);
case BWN_OFDM_RATE_54MB:
return (108);
default:
printf("Ooops\n");
return (0);
}
}
static void
bwn_rxeof(struct bwn_mac *mac, struct mbuf *m, const void *_rxhdr)
{
const struct bwn_rxhdr4 *rxhdr = _rxhdr;
struct bwn_plcp6 *plcp;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211_frame_min *wh;
struct ieee80211_node *ni;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t macstat;
int padding, rate, rssi = 0, noise = 0, type;
uint16_t phytype, phystat0, phystat3, chanstat;
unsigned char *mp = mtod(m, unsigned char *);
static int rx_mac_dec_rpt = 0;
BWN_ASSERT_LOCKED(sc);
phystat0 = le16toh(rxhdr->phy_status0);
phystat3 = le16toh(rxhdr->phy_status3);
macstat = le32toh(rxhdr->mac_status);
chanstat = le16toh(rxhdr->channel);
phytype = chanstat & BWN_RX_CHAN_PHYTYPE;
if (macstat & BWN_RX_MAC_FCSERR)
device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_FCS_CRC\n");
if (phystat0 & (BWN_RX_PHYST0_PLCPHCF | BWN_RX_PHYST0_PLCPFV))
device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_PLCP_CRC\n");
if (macstat & BWN_RX_MAC_DECERR)
goto drop;
padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0;
if (m->m_pkthdr.len < (sizeof(struct bwn_plcp6) + padding)) {
device_printf(sc->sc_dev, "frame too short (length=%d)\n",
m->m_pkthdr.len);
goto drop;
}
plcp = (struct bwn_plcp6 *)(mp + padding);
m_adj(m, sizeof(struct bwn_plcp6) + padding);
if (m->m_pkthdr.len < IEEE80211_MIN_LEN) {
device_printf(sc->sc_dev, "frame too short (length=%d)\n",
m->m_pkthdr.len);
goto drop;
}
wh = mtod(m, struct ieee80211_frame_min *);
if (macstat & BWN_RX_MAC_DEC && rx_mac_dec_rpt++ < 50)
device_printf(sc->sc_dev,
"RX decryption attempted (old %d keyidx %#x)\n",
BWN_ISOLDFMT(mac),
(macstat & BWN_RX_MAC_KEYIDX) >> BWN_RX_MAC_KEYIDX_SHIFT);
/* XXX calculating RSSI & noise & antenna */
if (phystat0 & BWN_RX_PHYST0_OFDM)
rate = bwn_plcp_get_ofdmrate(mac, plcp,
phytype == BWN_PHYTYPE_A);
else
rate = bwn_plcp_get_cckrate(mac, plcp);
if (rate == -1) {
if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADPLCP))
goto drop;
}
sc->sc_rx_rate = bwn_hwrate2ieeerate(rate);
/* RX radio tap */
if (ieee80211_radiotap_active(ic))
bwn_rx_radiotap(mac, m, rxhdr, plcp, rate, rssi, noise);
m_adj(m, -IEEE80211_CRC_LEN);
rssi = rxhdr->phy.abg.rssi; /* XXX incorrect RSSI calculation? */
noise = mac->mac_stats.link_noise;
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
-
BWN_UNLOCK(sc);
ni = ieee80211_find_rxnode(ic, wh);
if (ni != NULL) {
type = ieee80211_input(ni, m, rssi, noise);
ieee80211_free_node(ni);
} else
type = ieee80211_input_all(ic, m, rssi, noise);
BWN_LOCK(sc);
return;
drop:
device_printf(sc->sc_dev, "%s: dropped\n", __func__);
}
static void
bwn_dma_handle_txeof(struct bwn_mac *mac,
const struct bwn_txstatus *status)
{
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dma_ring *dr;
struct bwn_dmadesc_generic *desc;
struct bwn_dmadesc_meta *meta;
struct bwn_softc *sc = mac->mac_sc;
- struct ieee80211_node *ni;
- struct ifnet *ifp = sc->sc_ifp;
- struct mbuf *m;
int slot;
BWN_ASSERT_LOCKED(sc);
dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot);
if (dr == NULL) {
device_printf(sc->sc_dev, "failed to parse cookie\n");
return;
}
KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__));
while (1) {
KASSERT(slot >= 0 && slot < dr->dr_numslots,
("%s:%d: fail", __func__, __LINE__));
dr->getdesc(dr, slot, &desc, &meta);
if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER)
bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap);
else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY)
bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap);
if (meta->mt_islast) {
KASSERT(meta->mt_m != NULL,
("%s:%d: fail", __func__, __LINE__));
- ni = meta->mt_ni;
- m = meta->mt_m;
- if (ni != NULL) {
- /*
- * Do any tx complete callback. Note this must
- * be done before releasing the node reference.
- */
- if (m->m_flags & M_TXCB)
- ieee80211_process_callback(ni, m, 0);
- ieee80211_free_node(ni);
- meta->mt_ni = NULL;
- }
- m_freem(m);
+ ieee80211_tx_complete(meta->mt_ni, meta->mt_m, 0);
+ meta->mt_ni = NULL;
meta->mt_m = NULL;
- } else {
+ } else
KASSERT(meta->mt_m == NULL,
("%s:%d: fail", __func__, __LINE__));
- }
dr->dr_usedslot--;
- if (meta->mt_islast) {
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ if (meta->mt_islast)
break;
- }
slot = bwn_dma_nextslot(dr, slot);
}
sc->sc_watchdog_timer = 0;
if (dr->dr_stop) {
KASSERT(bwn_dma_freeslot(dr) >= BWN_TX_SLOTS_PER_FRAME,
("%s:%d: fail", __func__, __LINE__));
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
dr->dr_stop = 0;
}
}
static void
bwn_pio_handle_txeof(struct bwn_mac *mac,
const struct bwn_txstatus *status)
{
struct bwn_pio_txqueue *tq;
struct bwn_pio_txpkt *tp = NULL;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
BWN_ASSERT_LOCKED(sc);
tq = bwn_pio_parse_cookie(mac, status->cookie, &tp);
if (tq == NULL)
return;
tq->tq_used -= roundup(tp->tp_m->m_pkthdr.len + BWN_HDRSIZE(mac), 4);
tq->tq_free++;
if (tp->tp_ni != NULL) {
/*
* Do any tx complete callback. Note this must
* be done before releasing the node reference.
*/
if (tp->tp_m->m_flags & M_TXCB)
ieee80211_process_callback(tp->tp_ni, tp->tp_m, 0);
ieee80211_free_node(tp->tp_ni);
tp->tp_ni = NULL;
}
m_freem(tp->tp_m);
tp->tp_m = NULL;
TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
sc->sc_watchdog_timer = 0;
- if (tq->tq_stop) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- tq->tq_stop = 0;
- }
}
static void
bwn_phy_txpower_check(struct bwn_mac *mac, uint32_t flags)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy *phy = &mac->mac_phy;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
unsigned long now;
int result;
BWN_GETTIME(now);
if (!(flags & BWN_TXPWR_IGNORE_TIME) && time_before(now, phy->nexttime))
return;
phy->nexttime = now + 2 * 1000;
if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM &&
siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306)
return;
if (phy->recalc_txpwr != NULL) {
result = phy->recalc_txpwr(mac,
(flags & BWN_TXPWR_IGNORE_TSSI) ? 1 : 0);
if (result == BWN_TXPWR_RES_DONE)
return;
KASSERT(result == BWN_TXPWR_RES_NEED_ADJUST,
("%s: fail", __func__));
KASSERT(phy->set_txpwr != NULL, ("%s: fail", __func__));
ieee80211_runtask(ic, &mac->mac_txpower);
}
}
static uint16_t
bwn_pio_rx_read_2(struct bwn_pio_rxqueue *prq, uint16_t offset)
{
return (BWN_READ_2(prq->prq_mac, prq->prq_base + offset));
}
static uint32_t
bwn_pio_rx_read_4(struct bwn_pio_rxqueue *prq, uint16_t offset)
{
return (BWN_READ_4(prq->prq_mac, prq->prq_base + offset));
}
static void
bwn_pio_rx_write_2(struct bwn_pio_rxqueue *prq, uint16_t offset, uint16_t value)
{
BWN_WRITE_2(prq->prq_mac, prq->prq_base + offset, value);
}
static void
bwn_pio_rx_write_4(struct bwn_pio_rxqueue *prq, uint16_t offset, uint32_t value)
{
BWN_WRITE_4(prq->prq_mac, prq->prq_base + offset, value);
}
static int
bwn_ieeerate2hwrate(struct bwn_softc *sc, int rate)
{
switch (rate) {
/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
case 12:
return (BWN_OFDM_RATE_6MB);
case 18:
return (BWN_OFDM_RATE_9MB);
case 24:
return (BWN_OFDM_RATE_12MB);
case 36:
return (BWN_OFDM_RATE_18MB);
case 48:
return (BWN_OFDM_RATE_24MB);
case 72:
return (BWN_OFDM_RATE_36MB);
case 96:
return (BWN_OFDM_RATE_48MB);
case 108:
return (BWN_OFDM_RATE_54MB);
/* CCK rates (NB: not IEEE std, device-specific) */
case 2:
return (BWN_CCK_RATE_1MB);
case 4:
return (BWN_CCK_RATE_2MB);
case 11:
return (BWN_CCK_RATE_5MB);
case 22:
return (BWN_CCK_RATE_11MB);
}
device_printf(sc->sc_dev, "unsupported rate %d\n", rate);
return (BWN_CCK_RATE_1MB);
}
static int
bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni,
struct mbuf *m, struct bwn_txhdr *txhdr, uint16_t cookie)
{
const struct bwn_phy *phy = &mac->mac_phy;
struct bwn_softc *sc = mac->mac_sc;
struct ieee80211_frame *wh;
struct ieee80211_frame *protwh;
struct ieee80211_frame_cts *cts;
struct ieee80211_frame_rts *rts;
const struct ieee80211_txparam *tp;
struct ieee80211vap *vap = ni->ni_vap;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mbuf *mprot;
unsigned int len;
uint32_t macctl = 0;
int protdur, rts_rate, rts_rate_fb, ismcast, isshort, rix, type;
uint16_t phyctl = 0;
uint8_t rate, rate_fb;
wh = mtod(m, struct ieee80211_frame *);
memset(txhdr, 0, sizeof(*txhdr));
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0;
/*
* Find TX rate
*/
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL))
rate = rate_fb = tp->mgmtrate;
else if (ismcast)
rate = rate_fb = tp->mcastrate;
else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rate = rate_fb = tp->ucastrate;
else {
rix = ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
if (rix > 0)
rate_fb = ni->ni_rates.rs_rates[rix - 1] &
IEEE80211_RATE_VAL;
else
rate_fb = rate;
}
sc->sc_tx_rate = rate;
rate = bwn_ieeerate2hwrate(sc, rate);
rate_fb = bwn_ieeerate2hwrate(sc, rate_fb);
txhdr->phyrate = (BWN_ISOFDMRATE(rate)) ? bwn_plcp_getofdm(rate) :
bwn_plcp_getcck(rate);
bcopy(wh->i_fc, txhdr->macfc, sizeof(txhdr->macfc));
bcopy(wh->i_addr1, txhdr->addr1, IEEE80211_ADDR_LEN);
if ((rate_fb == rate) ||
(*(u_int16_t *)wh->i_dur & htole16(0x8000)) ||
(*(u_int16_t *)wh->i_dur == htole16(0)))
txhdr->dur_fb = *(u_int16_t *)wh->i_dur;
else
txhdr->dur_fb = ieee80211_compute_duration(ic->ic_rt,
m->m_pkthdr.len, rate, isshort);
/* XXX TX encryption */
bwn_plcp_genhdr(BWN_ISOLDFMT(mac) ?
(struct bwn_plcp4 *)(&txhdr->body.old.plcp) :
(struct bwn_plcp4 *)(&txhdr->body.new.plcp),
m->m_pkthdr.len + IEEE80211_CRC_LEN, rate);
bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->plcp_fb),
m->m_pkthdr.len + IEEE80211_CRC_LEN, rate_fb);
txhdr->eftypes |= (BWN_ISOFDMRATE(rate_fb)) ? BWN_TX_EFT_FB_OFDM :
BWN_TX_EFT_FB_CCK;
txhdr->chan = phy->chan;
phyctl |= (BWN_ISOFDMRATE(rate)) ? BWN_TX_PHY_ENC_OFDM :
BWN_TX_PHY_ENC_CCK;
if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB ||
rate == BWN_CCK_RATE_11MB))
phyctl |= BWN_TX_PHY_SHORTPRMBL;
/* XXX TX antenna selection */
switch (bwn_antenna_sanitize(mac, 0)) {
case 0:
phyctl |= BWN_TX_PHY_ANT01AUTO;
break;
case 1:
phyctl |= BWN_TX_PHY_ANT0;
break;
case 2:
phyctl |= BWN_TX_PHY_ANT1;
break;
case 3:
phyctl |= BWN_TX_PHY_ANT2;
break;
case 4:
phyctl |= BWN_TX_PHY_ANT3;
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
if (!ismcast)
macctl |= BWN_TX_MAC_ACK;
macctl |= (BWN_TX_MAC_HWSEQ | BWN_TX_MAC_START_MSDU);
if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
macctl |= BWN_TX_MAC_LONGFRAME;
if (ic->ic_flags & IEEE80211_F_USEPROT) {
/* XXX RTS rate is always 1MB??? */
rts_rate = BWN_CCK_RATE_1MB;
rts_rate_fb = bwn_get_fbrate(rts_rate);
protdur = ieee80211_compute_duration(ic->ic_rt,
m->m_pkthdr.len, rate, isshort) +
+ ieee80211_ack_duration(ic->ic_rt, rate, isshort);
if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
cts = (struct ieee80211_frame_cts *)(BWN_ISOLDFMT(mac) ?
(txhdr->body.old.rts_frame) :
(txhdr->body.new.rts_frame));
mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr,
protdur);
KASSERT(mprot != NULL, ("failed to alloc mbuf\n"));
bcopy(mtod(mprot, uint8_t *), (uint8_t *)cts,
mprot->m_pkthdr.len);
m_freem(mprot);
macctl |= BWN_TX_MAC_SEND_CTSTOSELF;
len = sizeof(struct ieee80211_frame_cts);
} else {
rts = (struct ieee80211_frame_rts *)(BWN_ISOLDFMT(mac) ?
(txhdr->body.old.rts_frame) :
(txhdr->body.new.rts_frame));
protdur += ieee80211_ack_duration(ic->ic_rt, rate,
isshort);
mprot = ieee80211_alloc_rts(ic, wh->i_addr1,
wh->i_addr2, protdur);
KASSERT(mprot != NULL, ("failed to alloc mbuf\n"));
bcopy(mtod(mprot, uint8_t *), (uint8_t *)rts,
mprot->m_pkthdr.len);
m_freem(mprot);
macctl |= BWN_TX_MAC_SEND_RTSCTS;
len = sizeof(struct ieee80211_frame_rts);
}
len += IEEE80211_CRC_LEN;
bwn_plcp_genhdr((struct bwn_plcp4 *)((BWN_ISOLDFMT(mac)) ?
&txhdr->body.old.rts_plcp :
&txhdr->body.new.rts_plcp), len, rts_rate);
bwn_plcp_genhdr((struct bwn_plcp4 *)&txhdr->rts_plcp_fb, len,
rts_rate_fb);
protwh = (struct ieee80211_frame *)(BWN_ISOLDFMT(mac) ?
(&txhdr->body.old.rts_frame) :
(&txhdr->body.new.rts_frame));
txhdr->rts_dur_fb = *(u_int16_t *)protwh->i_dur;
if (BWN_ISOFDMRATE(rts_rate)) {
txhdr->eftypes |= BWN_TX_EFT_RTS_OFDM;
txhdr->phyrate_rts = bwn_plcp_getofdm(rts_rate);
} else {
txhdr->eftypes |= BWN_TX_EFT_RTS_CCK;
txhdr->phyrate_rts = bwn_plcp_getcck(rts_rate);
}
txhdr->eftypes |= (BWN_ISOFDMRATE(rts_rate_fb)) ?
BWN_TX_EFT_RTS_FBOFDM : BWN_TX_EFT_RTS_FBCCK;
}
if (BWN_ISOLDFMT(mac))
txhdr->body.old.cookie = htole16(cookie);
else
txhdr->body.new.cookie = htole16(cookie);
txhdr->macctl = htole32(macctl);
txhdr->phyctl = htole16(phyctl);
/*
* TX radio tap
*/
if (ieee80211_radiotap_active_vap(vap)) {
sc->sc_tx_th.wt_flags = 0;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
if (isshort &&
(rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB ||
rate == BWN_CCK_RATE_11MB))
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
sc->sc_tx_th.wt_rate = rate;
ieee80211_radiotap_tx(vap, m);
}
return (0);
}
static void
bwn_plcp_genhdr(struct bwn_plcp4 *plcp, const uint16_t octets,
const uint8_t rate)
{
uint32_t d, plen;
uint8_t *raw = plcp->o.raw;
if (BWN_ISOFDMRATE(rate)) {
d = bwn_plcp_getofdm(rate);
KASSERT(!(octets & 0xf000),
("%s:%d: fail", __func__, __LINE__));
d |= (octets << 5);
plcp->o.data = htole32(d);
} else {
plen = octets * 16 / rate;
if ((octets * 16 % rate) > 0) {
plen++;
if ((rate == BWN_CCK_RATE_11MB)
&& ((octets * 8 % 11) < 4)) {
raw[1] = 0x84;
} else
raw[1] = 0x04;
} else
raw[1] = 0x04;
plcp->o.data |= htole32(plen << 16);
raw[0] = bwn_plcp_getcck(rate);
}
}
static uint8_t
bwn_antenna_sanitize(struct bwn_mac *mac, uint8_t n)
{
struct bwn_softc *sc = mac->mac_sc;
uint8_t mask;
if (n == 0)
return (0);
if (mac->mac_phy.gmode)
mask = siba_sprom_get_ant_bg(sc->sc_dev);
else
mask = siba_sprom_get_ant_a(sc->sc_dev);
if (!(mask & (1 << (n - 1))))
return (0);
return (n);
}
static uint8_t
bwn_get_fbrate(uint8_t bitrate)
{
switch (bitrate) {
case BWN_CCK_RATE_1MB:
return (BWN_CCK_RATE_1MB);
case BWN_CCK_RATE_2MB:
return (BWN_CCK_RATE_1MB);
case BWN_CCK_RATE_5MB:
return (BWN_CCK_RATE_2MB);
case BWN_CCK_RATE_11MB:
return (BWN_CCK_RATE_5MB);
case BWN_OFDM_RATE_6MB:
return (BWN_CCK_RATE_5MB);
case BWN_OFDM_RATE_9MB:
return (BWN_OFDM_RATE_6MB);
case BWN_OFDM_RATE_12MB:
return (BWN_OFDM_RATE_9MB);
case BWN_OFDM_RATE_18MB:
return (BWN_OFDM_RATE_12MB);
case BWN_OFDM_RATE_24MB:
return (BWN_OFDM_RATE_18MB);
case BWN_OFDM_RATE_36MB:
return (BWN_OFDM_RATE_24MB);
case BWN_OFDM_RATE_48MB:
return (BWN_OFDM_RATE_36MB);
case BWN_OFDM_RATE_54MB:
return (BWN_OFDM_RATE_48MB);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (0);
}
static uint32_t
bwn_pio_write_multi_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint32_t ctl, const void *_data, int len)
{
struct bwn_softc *sc = mac->mac_sc;
uint32_t value = 0;
const uint8_t *data = _data;
ctl |= BWN_PIO8_TXCTL_0_7 | BWN_PIO8_TXCTL_8_15 |
BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31;
bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl);
siba_write_multi_4(sc->sc_dev, data, (len & ~3),
tq->tq_base + BWN_PIO8_TXDATA);
if (len & 3) {
ctl &= ~(BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 |
BWN_PIO8_TXCTL_24_31);
data = &(data[len - 1]);
switch (len & 3) {
case 3:
ctl |= BWN_PIO8_TXCTL_16_23;
value |= (uint32_t)(*data) << 16;
data--;
case 2:
ctl |= BWN_PIO8_TXCTL_8_15;
value |= (uint32_t)(*data) << 8;
data--;
case 1:
value |= (uint32_t)(*data);
}
bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl);
bwn_pio_write_4(mac, tq, BWN_PIO8_TXDATA, value);
}
return (ctl);
}
static void
bwn_pio_write_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint16_t offset, uint32_t value)
{
BWN_WRITE_4(mac, tq->tq_base + offset, value);
}
static uint16_t
bwn_pio_write_multi_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint16_t ctl, const void *_data, int len)
{
struct bwn_softc *sc = mac->mac_sc;
const uint8_t *data = _data;
ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI;
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl);
siba_write_multi_2(sc->sc_dev, data, (len & ~1),
tq->tq_base + BWN_PIO_TXDATA);
if (len & 1) {
ctl &= ~BWN_PIO_TXCTL_WRITEHI;
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl);
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data[len - 1]);
}
return (ctl);
}
static uint16_t
bwn_pio_write_mbuf_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq,
uint16_t ctl, struct mbuf *m0)
{
int i, j = 0;
uint16_t data = 0;
const uint8_t *buf;
struct mbuf *m = m0;
ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI;
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl);
for (; m != NULL; m = m->m_next) {
buf = mtod(m, const uint8_t *);
for (i = 0; i < m->m_len; i++) {
if (!((j++) % 2))
data |= buf[i];
else {
data |= (buf[i] << 8);
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data);
data = 0;
}
}
}
if (m0->m_pkthdr.len % 2) {
ctl &= ~BWN_PIO_TXCTL_WRITEHI;
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl);
BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data);
}
return (ctl);
}
static void
bwn_set_slot_time(struct bwn_mac *mac, uint16_t time)
{
if (mac->mac_phy.type != BWN_PHYTYPE_G)
return;
BWN_WRITE_2(mac, 0x684, 510 + time);
bwn_shm_write_2(mac, BWN_SHARED, 0x0010, time);
}
static struct bwn_dma_ring *
bwn_dma_select(struct bwn_mac *mac, uint8_t prio)
{
if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0)
return (mac->mac_method.dma.wme[WME_AC_BE]);
switch (prio) {
case 3:
return (mac->mac_method.dma.wme[WME_AC_VO]);
case 2:
return (mac->mac_method.dma.wme[WME_AC_VI]);
case 0:
return (mac->mac_method.dma.wme[WME_AC_BE]);
case 1:
return (mac->mac_method.dma.wme[WME_AC_BK]);
}
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
return (NULL);
}
static int
bwn_dma_getslot(struct bwn_dma_ring *dr)
{
int slot;
BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc);
KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__));
KASSERT(!(dr->dr_stop), ("%s:%d: fail", __func__, __LINE__));
KASSERT(bwn_dma_freeslot(dr) != 0, ("%s:%d: fail", __func__, __LINE__));
slot = bwn_dma_nextslot(dr, dr->dr_curslot);
KASSERT(!(slot & ~0x0fff), ("%s:%d: fail", __func__, __LINE__));
dr->dr_curslot = slot;
dr->dr_usedslot++;
return (slot);
}
static int
bwn_phy_shm_tssi_read(struct bwn_mac *mac, uint16_t shm_offset)
{
const uint8_t ofdm = (shm_offset != BWN_SHARED_TSSI_CCK);
unsigned int a, b, c, d;
unsigned int avg;
uint32_t tmp;
tmp = bwn_shm_read_4(mac, BWN_SHARED, shm_offset);
a = tmp & 0xff;
b = (tmp >> 8) & 0xff;
c = (tmp >> 16) & 0xff;
d = (tmp >> 24) & 0xff;
if (a == 0 || a == BWN_TSSI_MAX || b == 0 || b == BWN_TSSI_MAX ||
c == 0 || c == BWN_TSSI_MAX || d == 0 || d == BWN_TSSI_MAX)
return (ENOENT);
bwn_shm_write_4(mac, BWN_SHARED, shm_offset,
BWN_TSSI_MAX | (BWN_TSSI_MAX << 8) |
(BWN_TSSI_MAX << 16) | (BWN_TSSI_MAX << 24));
if (ofdm) {
a = (a + 32) & 0x3f;
b = (b + 32) & 0x3f;
c = (c + 32) & 0x3f;
d = (d + 32) & 0x3f;
}
avg = (a + b + c + d + 2) / 4;
if (ofdm) {
if (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO)
& BWN_HF_4DB_CCK_POWERBOOST)
avg = (avg >= 13) ? (avg - 13) : 0;
}
return (avg);
}
static void
bwn_phy_g_setatt(struct bwn_mac *mac, int *bbattp, int *rfattp)
{
struct bwn_txpwr_loctl *lo = &mac->mac_phy.phy_g.pg_loctl;
int rfatt = *rfattp;
int bbatt = *bbattp;
while (1) {
if (rfatt > lo->rfatt.max && bbatt > lo->bbatt.max - 4)
break;
if (rfatt < lo->rfatt.min && bbatt < lo->bbatt.min + 4)
break;
if (bbatt > lo->bbatt.max && rfatt > lo->rfatt.max - 1)
break;
if (bbatt < lo->bbatt.min && rfatt < lo->rfatt.min + 1)
break;
if (bbatt > lo->bbatt.max) {
bbatt -= 4;
rfatt += 1;
continue;
}
if (bbatt < lo->bbatt.min) {
bbatt += 4;
rfatt -= 1;
continue;
}
if (rfatt > lo->rfatt.max) {
rfatt -= 1;
bbatt += 4;
continue;
}
if (rfatt < lo->rfatt.min) {
rfatt += 1;
bbatt -= 4;
continue;
}
break;
}
*rfattp = MIN(MAX(rfatt, lo->rfatt.min), lo->rfatt.max);
*bbattp = MIN(MAX(bbatt, lo->bbatt.min), lo->bbatt.max);
}
static void
bwn_phy_lock(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
KASSERT(siba_get_revid(sc->sc_dev) >= 3,
("%s: unsupported rev %d", __func__, siba_get_revid(sc->sc_dev)));
if (ic->ic_opmode != IEEE80211_M_HOSTAP)
bwn_psctl(mac, BWN_PS_AWAKE);
}
static void
bwn_phy_unlock(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
KASSERT(siba_get_revid(sc->sc_dev) >= 3,
("%s: unsupported rev %d", __func__, siba_get_revid(sc->sc_dev)));
if (ic->ic_opmode != IEEE80211_M_HOSTAP)
bwn_psctl(mac, 0);
}
static void
bwn_rf_lock(struct bwn_mac *mac)
{
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_RADIO_LOCK);
BWN_READ_4(mac, BWN_MACCTL);
DELAY(10);
}
static void
bwn_rf_unlock(struct bwn_mac *mac)
{
BWN_READ_2(mac, BWN_PHYVER);
BWN_WRITE_4(mac, BWN_MACCTL,
BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_RADIO_LOCK);
}
static struct bwn_pio_txqueue *
bwn_pio_parse_cookie(struct bwn_mac *mac, uint16_t cookie,
struct bwn_pio_txpkt **pack)
{
struct bwn_pio *pio = &mac->mac_method.pio;
struct bwn_pio_txqueue *tq = NULL;
unsigned int index;
switch (cookie & 0xf000) {
case 0x1000:
tq = &pio->wme[WME_AC_BK];
break;
case 0x2000:
tq = &pio->wme[WME_AC_BE];
break;
case 0x3000:
tq = &pio->wme[WME_AC_VI];
break;
case 0x4000:
tq = &pio->wme[WME_AC_VO];
break;
case 0x5000:
tq = &pio->mcast;
break;
}
KASSERT(tq != NULL, ("%s:%d: fail", __func__, __LINE__));
if (tq == NULL)
return (NULL);
index = (cookie & 0x0fff);
KASSERT(index < N(tq->tq_pkts), ("%s:%d: fail", __func__, __LINE__));
if (index >= N(tq->tq_pkts))
return (NULL);
*pack = &tq->tq_pkts[index];
KASSERT(*pack != NULL, ("%s:%d: fail", __func__, __LINE__));
return (tq);
}
static void
bwn_txpwr(void *arg, int npending)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
BWN_LOCK(sc);
if (mac && mac->mac_status >= BWN_MAC_STATUS_STARTED &&
mac->mac_phy.set_txpwr != NULL)
mac->mac_phy.set_txpwr(mac);
BWN_UNLOCK(sc);
}
static void
bwn_task_15s(struct bwn_mac *mac)
{
uint16_t reg;
if (mac->mac_fw.opensource) {
reg = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG);
if (reg) {
bwn_restart(mac, "fw watchdog");
return;
}
bwn_shm_write_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG, 1);
}
if (mac->mac_phy.task_15s)
mac->mac_phy.task_15s(mac);
mac->mac_phy.txerrors = BWN_TXERROR_MAX;
}
static void
bwn_task_30s(struct bwn_mac *mac)
{
if (mac->mac_phy.type != BWN_PHYTYPE_G || mac->mac_noise.noi_running)
return;
mac->mac_noise.noi_running = 1;
mac->mac_noise.noi_nsamples = 0;
bwn_noise_gensample(mac);
}
static void
bwn_task_60s(struct bwn_mac *mac)
{
if (mac->mac_phy.task_60s)
mac->mac_phy.task_60s(mac);
bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME);
}
static void
bwn_tasks(void *arg)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
BWN_ASSERT_LOCKED(sc);
if (mac->mac_status != BWN_MAC_STATUS_STARTED)
return;
if (mac->mac_task_state % 4 == 0)
bwn_task_60s(mac);
if (mac->mac_task_state % 2 == 0)
bwn_task_30s(mac);
bwn_task_15s(mac);
mac->mac_task_state++;
callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac);
}
static int
bwn_plcp_get_ofdmrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp, uint8_t a)
{
struct bwn_softc *sc = mac->mac_sc;
KASSERT(a == 0, ("not support APHY\n"));
switch (plcp->o.raw[0] & 0xf) {
case 0xb:
return (BWN_OFDM_RATE_6MB);
case 0xf:
return (BWN_OFDM_RATE_9MB);
case 0xa:
return (BWN_OFDM_RATE_12MB);
case 0xe:
return (BWN_OFDM_RATE_18MB);
case 0x9:
return (BWN_OFDM_RATE_24MB);
case 0xd:
return (BWN_OFDM_RATE_36MB);
case 0x8:
return (BWN_OFDM_RATE_48MB);
case 0xc:
return (BWN_OFDM_RATE_54MB);
}
device_printf(sc->sc_dev, "incorrect OFDM rate %d\n",
plcp->o.raw[0] & 0xf);
return (-1);
}
static int
bwn_plcp_get_cckrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp)
{
struct bwn_softc *sc = mac->mac_sc;
switch (plcp->o.raw[0]) {
case 0x0a:
return (BWN_CCK_RATE_1MB);
case 0x14:
return (BWN_CCK_RATE_2MB);
case 0x37:
return (BWN_CCK_RATE_5MB);
case 0x6e:
return (BWN_CCK_RATE_11MB);
}
device_printf(sc->sc_dev, "incorrect CCK rate %d\n", plcp->o.raw[0]);
return (-1);
}
static void
bwn_rx_radiotap(struct bwn_mac *mac, struct mbuf *m,
const struct bwn_rxhdr4 *rxhdr, struct bwn_plcp6 *plcp, int rate,
int rssi, int noise)
{
struct bwn_softc *sc = mac->mac_sc;
const struct ieee80211_frame_min *wh;
uint64_t tsf;
uint16_t low_mactime_now;
if (htole16(rxhdr->phy_status0) & BWN_RX_PHYST0_SHORTPRMBL)
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
wh = mtod(m, const struct ieee80211_frame_min *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP;
bwn_tsf_read(mac, &tsf);
low_mactime_now = tsf;
tsf = tsf & ~0xffffULL;
tsf += le16toh(rxhdr->mac_time);
if (low_mactime_now < le16toh(rxhdr->mac_time))
tsf -= 0x10000;
sc->sc_rx_th.wr_tsf = tsf;
sc->sc_rx_th.wr_rate = rate;
sc->sc_rx_th.wr_antsignal = rssi;
sc->sc_rx_th.wr_antnoise = noise;
}
static void
bwn_tsf_read(struct bwn_mac *mac, uint64_t *tsf)
{
uint32_t low, high;
KASSERT(siba_get_revid(mac->mac_sc->sc_dev) >= 3,
("%s:%d: fail", __func__, __LINE__));
low = BWN_READ_4(mac, BWN_REV3PLUS_TSF_LOW);
high = BWN_READ_4(mac, BWN_REV3PLUS_TSF_HIGH);
*tsf = high;
*tsf <<= 32;
*tsf |= low;
}
static int
bwn_dma_attach(struct bwn_mac *mac)
{
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_softc *sc = mac->mac_sc;
bus_addr_t lowaddr = 0;
int error;
if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0)
return (0);
KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("%s: fail", __func__));
mac->mac_flags |= BWN_MAC_FLAG_DMA;
dma->dmatype = bwn_dma_gettype(mac);
if (dma->dmatype == BWN_DMA_30BIT)
lowaddr = BWN_BUS_SPACE_MAXADDR_30BIT;
else if (dma->dmatype == BWN_DMA_32BIT)
lowaddr = BUS_SPACE_MAXADDR_32BIT;
else
lowaddr = BUS_SPACE_MAXADDR;
/*
* Create top level DMA tag
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
BWN_ALIGN, 0, /* alignment, bounds */
lowaddr, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE, /* maxsize */
BUS_SPACE_UNRESTRICTED, /* nsegments */
BUS_SPACE_MAXSIZE, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&dma->parent_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create parent DMA tag\n");
return (error);
}
/*
* Create TX/RX mbuf DMA tag
*/
error = bus_dma_tag_create(dma->parent_dtag,
1,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
MCLBYTES,
1,
BUS_SPACE_MAXSIZE_32BIT,
0,
NULL, NULL,
&dma->rxbuf_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create mbuf DMA tag\n");
goto fail0;
}
error = bus_dma_tag_create(dma->parent_dtag,
1,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
MCLBYTES,
1,
BUS_SPACE_MAXSIZE_32BIT,
0,
NULL, NULL,
&dma->txbuf_dtag);
if (error) {
device_printf(sc->sc_dev, "can't create mbuf DMA tag\n");
goto fail1;
}
dma->wme[WME_AC_BK] = bwn_dma_ringsetup(mac, 0, 1, dma->dmatype);
if (!dma->wme[WME_AC_BK])
goto fail2;
dma->wme[WME_AC_BE] = bwn_dma_ringsetup(mac, 1, 1, dma->dmatype);
if (!dma->wme[WME_AC_BE])
goto fail3;
dma->wme[WME_AC_VI] = bwn_dma_ringsetup(mac, 2, 1, dma->dmatype);
if (!dma->wme[WME_AC_VI])
goto fail4;
dma->wme[WME_AC_VO] = bwn_dma_ringsetup(mac, 3, 1, dma->dmatype);
if (!dma->wme[WME_AC_VO])
goto fail5;
dma->mcast = bwn_dma_ringsetup(mac, 4, 1, dma->dmatype);
if (!dma->mcast)
goto fail6;
dma->rx = bwn_dma_ringsetup(mac, 0, 0, dma->dmatype);
if (!dma->rx)
goto fail7;
return (error);
fail7: bwn_dma_ringfree(&dma->mcast);
fail6: bwn_dma_ringfree(&dma->wme[WME_AC_VO]);
fail5: bwn_dma_ringfree(&dma->wme[WME_AC_VI]);
fail4: bwn_dma_ringfree(&dma->wme[WME_AC_BE]);
fail3: bwn_dma_ringfree(&dma->wme[WME_AC_BK]);
fail2: bus_dma_tag_destroy(dma->txbuf_dtag);
fail1: bus_dma_tag_destroy(dma->rxbuf_dtag);
fail0: bus_dma_tag_destroy(dma->parent_dtag);
return (error);
}
static struct bwn_dma_ring *
bwn_dma_parse_cookie(struct bwn_mac *mac, const struct bwn_txstatus *status,
uint16_t cookie, int *slot)
{
struct bwn_dma *dma = &mac->mac_method.dma;
struct bwn_dma_ring *dr;
struct bwn_softc *sc = mac->mac_sc;
BWN_ASSERT_LOCKED(mac->mac_sc);
switch (cookie & 0xf000) {
case 0x1000:
dr = dma->wme[WME_AC_BK];
break;
case 0x2000:
dr = dma->wme[WME_AC_BE];
break;
case 0x3000:
dr = dma->wme[WME_AC_VI];
break;
case 0x4000:
dr = dma->wme[WME_AC_VO];
break;
case 0x5000:
dr = dma->mcast;
break;
default:
dr = NULL;
KASSERT(0 == 1,
("invalid cookie value %d", cookie & 0xf000));
}
*slot = (cookie & 0x0fff);
if (*slot < 0 || *slot >= dr->dr_numslots) {
/*
* XXX FIXME: sometimes H/W returns TX DONE events duplicately
* that it occurs events which have same H/W sequence numbers.
* When it's occurred just prints a WARNING msgs and ignores.
*/
KASSERT(status->seq == dma->lastseq,
("%s:%d: fail", __func__, __LINE__));
device_printf(sc->sc_dev,
"out of slot ranges (0 < %d < %d)\n", *slot,
dr->dr_numslots);
return (NULL);
}
dma->lastseq = status->seq;
return (dr);
}
static void
bwn_dma_stop(struct bwn_mac *mac)
{
struct bwn_dma *dma;
if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0)
return;
dma = &mac->mac_method.dma;
bwn_dma_ringstop(&dma->rx);
bwn_dma_ringstop(&dma->wme[WME_AC_BK]);
bwn_dma_ringstop(&dma->wme[WME_AC_BE]);
bwn_dma_ringstop(&dma->wme[WME_AC_VI]);
bwn_dma_ringstop(&dma->wme[WME_AC_VO]);
bwn_dma_ringstop(&dma->mcast);
}
static void
bwn_dma_ringstop(struct bwn_dma_ring **dr)
{
if (dr == NULL)
return;
bwn_dma_cleanup(*dr);
}
static void
bwn_pio_stop(struct bwn_mac *mac)
{
struct bwn_pio *pio;
if (mac->mac_flags & BWN_MAC_FLAG_DMA)
return;
pio = &mac->mac_method.pio;
bwn_destroy_queue_tx(&pio->mcast);
bwn_destroy_queue_tx(&pio->wme[WME_AC_VO]);
bwn_destroy_queue_tx(&pio->wme[WME_AC_VI]);
bwn_destroy_queue_tx(&pio->wme[WME_AC_BE]);
bwn_destroy_queue_tx(&pio->wme[WME_AC_BK]);
}
static void
bwn_led_attach(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
const uint8_t *led_act = NULL;
uint16_t val[BWN_LED_MAX];
int i;
sc->sc_led_idle = (2350 * hz) / 1000;
sc->sc_led_blink = 1;
for (i = 0; i < N(bwn_vendor_led_act); ++i) {
if (siba_get_pci_subvendor(sc->sc_dev) ==
bwn_vendor_led_act[i].vid) {
led_act = bwn_vendor_led_act[i].led_act;
break;
}
}
if (led_act == NULL)
led_act = bwn_default_led_act;
val[0] = siba_sprom_get_gpio0(sc->sc_dev);
val[1] = siba_sprom_get_gpio1(sc->sc_dev);
val[2] = siba_sprom_get_gpio2(sc->sc_dev);
val[3] = siba_sprom_get_gpio3(sc->sc_dev);
for (i = 0; i < BWN_LED_MAX; ++i) {
struct bwn_led *led = &sc->sc_leds[i];
if (val[i] == 0xff) {
led->led_act = led_act[i];
} else {
if (val[i] & BWN_LED_ACT_LOW)
led->led_flags |= BWN_LED_F_ACTLOW;
led->led_act = val[i] & BWN_LED_ACT_MASK;
}
led->led_mask = (1 << i);
if (led->led_act == BWN_LED_ACT_BLINK_SLOW ||
led->led_act == BWN_LED_ACT_BLINK_POLL ||
led->led_act == BWN_LED_ACT_BLINK) {
led->led_flags |= BWN_LED_F_BLINK;
if (led->led_act == BWN_LED_ACT_BLINK_POLL)
led->led_flags |= BWN_LED_F_POLLABLE;
else if (led->led_act == BWN_LED_ACT_BLINK_SLOW)
led->led_flags |= BWN_LED_F_SLOW;
if (sc->sc_blink_led == NULL) {
sc->sc_blink_led = led;
if (led->led_flags & BWN_LED_F_SLOW)
BWN_LED_SLOWDOWN(sc->sc_led_idle);
}
}
DPRINTF(sc, BWN_DEBUG_LED,
"%dth led, act %d, lowact %d\n", i,
led->led_act, led->led_flags & BWN_LED_F_ACTLOW);
}
callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0);
}
static __inline uint16_t
bwn_led_onoff(const struct bwn_led *led, uint16_t val, int on)
{
if (led->led_flags & BWN_LED_F_ACTLOW)
on = !on;
if (on)
val |= led->led_mask;
else
val &= ~led->led_mask;
return val;
}
static void
bwn_led_newstate(struct bwn_mac *mac, enum ieee80211_state nstate)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t val;
int i;
if (nstate == IEEE80211_S_INIT) {
callout_stop(&sc->sc_led_blink_ch);
sc->sc_led_blinking = 0;
}
- if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0)
return;
val = BWN_READ_2(mac, BWN_GPIO_CONTROL);
for (i = 0; i < BWN_LED_MAX; ++i) {
struct bwn_led *led = &sc->sc_leds[i];
int on;
if (led->led_act == BWN_LED_ACT_UNKN ||
led->led_act == BWN_LED_ACT_NULL)
continue;
if ((led->led_flags & BWN_LED_F_BLINK) &&
nstate != IEEE80211_S_INIT)
continue;
switch (led->led_act) {
case BWN_LED_ACT_ON: /* Always on */
on = 1;
break;
case BWN_LED_ACT_OFF: /* Always off */
case BWN_LED_ACT_5GHZ: /* TODO: 11A */
on = 0;
break;
default:
on = 1;
switch (nstate) {
case IEEE80211_S_INIT:
on = 0;
break;
case IEEE80211_S_RUN:
if (led->led_act == BWN_LED_ACT_11G &&
ic->ic_curmode != IEEE80211_MODE_11G)
on = 0;
break;
default:
if (led->led_act == BWN_LED_ACT_ASSOC)
on = 0;
break;
}
break;
}
val = bwn_led_onoff(led, val, on);
}
BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val);
}
static void
bwn_led_event(struct bwn_mac *mac, int event)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_led *led = sc->sc_blink_led;
int rate;
if (event == BWN_LED_EVENT_POLL) {
if ((led->led_flags & BWN_LED_F_POLLABLE) == 0)
return;
if (ticks - sc->sc_led_ticks < sc->sc_led_idle)
return;
}
sc->sc_led_ticks = ticks;
if (sc->sc_led_blinking)
return;
switch (event) {
case BWN_LED_EVENT_RX:
rate = sc->sc_rx_rate;
break;
case BWN_LED_EVENT_TX:
rate = sc->sc_tx_rate;
break;
case BWN_LED_EVENT_POLL:
rate = 0;
break;
default:
panic("unknown LED event %d\n", event);
break;
}
bwn_led_blink_start(mac, bwn_led_duration[rate].on_dur,
bwn_led_duration[rate].off_dur);
}
static void
bwn_led_blink_start(struct bwn_mac *mac, int on_dur, int off_dur)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_led *led = sc->sc_blink_led;
uint16_t val;
val = BWN_READ_2(mac, BWN_GPIO_CONTROL);
val = bwn_led_onoff(led, val, 1);
BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val);
if (led->led_flags & BWN_LED_F_SLOW) {
BWN_LED_SLOWDOWN(on_dur);
BWN_LED_SLOWDOWN(off_dur);
}
sc->sc_led_blinking = 1;
sc->sc_led_blink_offdur = off_dur;
callout_reset(&sc->sc_led_blink_ch, on_dur, bwn_led_blink_next, mac);
}
static void
bwn_led_blink_next(void *arg)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
uint16_t val;
val = BWN_READ_2(mac, BWN_GPIO_CONTROL);
val = bwn_led_onoff(sc->sc_blink_led, val, 0);
BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val);
callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur,
bwn_led_blink_end, mac);
}
static void
bwn_led_blink_end(void *arg)
{
struct bwn_mac *mac = arg;
struct bwn_softc *sc = mac->mac_sc;
sc->sc_led_blinking = 0;
}
static int
bwn_suspend(device_t dev)
{
struct bwn_softc *sc = device_get_softc(dev);
- bwn_stop(sc, 1);
+ BWN_LOCK(sc);
+ bwn_stop(sc);
+ BWN_UNLOCK(sc);
return (0);
}
static int
bwn_resume(device_t dev)
{
struct bwn_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
+ int error = EDOOFUS;
- if (ifp->if_flags & IFF_UP)
- bwn_init(sc);
+ BWN_LOCK(sc);
+ if (sc->sc_ic.ic_nrunning > 0)
+ error = bwn_init(sc);
+ BWN_UNLOCK(sc);
+ if (error == 0)
+ ieee80211_start_all(&sc->sc_ic);
return (0);
}
static void
bwn_rfswitch(void *arg)
{
struct bwn_softc *sc = arg;
struct bwn_mac *mac = sc->sc_curmac;
int cur = 0, prev = 0;
KASSERT(mac->mac_status >= BWN_MAC_STATUS_STARTED,
("%s: invalid MAC status %d", __func__, mac->mac_status));
if (mac->mac_phy.rev >= 3 || mac->mac_phy.type == BWN_PHYTYPE_LP) {
if (!(BWN_READ_4(mac, BWN_RF_HWENABLED_HI)
& BWN_RF_HWENABLED_HI_MASK))
cur = 1;
} else {
if (BWN_READ_2(mac, BWN_RF_HWENABLED_LO)
& BWN_RF_HWENABLED_LO_MASK)
cur = 1;
}
if (mac->mac_flags & BWN_MAC_FLAG_RADIO_ON)
prev = 1;
if (cur != prev) {
if (cur)
mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON;
else
mac->mac_flags &= ~BWN_MAC_FLAG_RADIO_ON;
device_printf(sc->sc_dev,
"status of RF switch is changed to %s\n",
cur ? "ON" : "OFF");
if (cur != mac->mac_phy.rf_on) {
if (cur)
bwn_rf_turnon(mac);
else
bwn_rf_turnoff(mac);
}
}
callout_schedule(&sc->sc_rfswitch_ch, hz);
}
static void
bwn_phy_lp_init_pre(struct bwn_mac *mac)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_lp *plp = &phy->phy_lp;
plp->plp_antenna = BWN_ANT_DEFAULT;
}
static int
bwn_phy_lp_init(struct bwn_mac *mac)
{
static const struct bwn_stxtable tables[] = {
{ 2, 6, 0x3d, 3, 0x01 }, { 1, 12, 0x4c, 1, 0x01 },
{ 1, 8, 0x50, 0, 0x7f }, { 0, 8, 0x44, 0, 0xff },
{ 1, 0, 0x4a, 0, 0xff }, { 0, 4, 0x4d, 0, 0xff },
{ 1, 4, 0x4e, 0, 0xff }, { 0, 12, 0x4f, 0, 0x0f },
{ 1, 0, 0x4f, 4, 0x0f }, { 3, 0, 0x49, 0, 0x0f },
{ 4, 3, 0x46, 4, 0x07 }, { 3, 15, 0x46, 0, 0x01 },
{ 4, 0, 0x46, 1, 0x07 }, { 3, 8, 0x48, 4, 0x07 },
{ 3, 11, 0x48, 0, 0x0f }, { 3, 4, 0x49, 4, 0x0f },
{ 2, 15, 0x45, 0, 0x01 }, { 5, 13, 0x52, 4, 0x07 },
{ 6, 0, 0x52, 7, 0x01 }, { 5, 3, 0x41, 5, 0x07 },
{ 5, 6, 0x41, 0, 0x0f }, { 5, 10, 0x42, 5, 0x07 },
{ 4, 15, 0x42, 0, 0x01 }, { 5, 0, 0x42, 1, 0x07 },
{ 4, 11, 0x43, 4, 0x0f }, { 4, 7, 0x43, 0, 0x0f },
{ 4, 6, 0x45, 1, 0x01 }, { 2, 7, 0x40, 4, 0x0f },
{ 2, 11, 0x40, 0, 0x0f }
};
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
const struct bwn_stxtable *st;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int i, error;
uint16_t tmp;
bwn_phy_lp_readsprom(mac); /* XXX bad place */
bwn_phy_lp_bbinit(mac);
/* initialize RF */
BWN_PHY_SET(mac, BWN_PHY_4WIRECTL, 0x2);
DELAY(1);
BWN_PHY_MASK(mac, BWN_PHY_4WIRECTL, 0xfffd);
DELAY(1);
if (mac->mac_phy.rf_ver == 0x2062)
bwn_phy_lp_b2062_init(mac);
else {
bwn_phy_lp_b2063_init(mac);
/* synchronize stx table. */
for (i = 0; i < N(tables); i++) {
st = &tables[i];
tmp = BWN_RF_READ(mac, st->st_rfaddr);
tmp >>= st->st_rfshift;
tmp <<= st->st_physhift;
BWN_PHY_SETMASK(mac,
BWN_PHY_OFDM(0xf2 + st->st_phyoffset),
~(st->st_mask << st->st_physhift), tmp);
}
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf0), 0x5f80);
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf1), 0);
}
/* calibrate RC */
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_rxcal_r2(mac);
else if (!plp->plp_rccap) {
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
bwn_phy_lp_rccal_r12(mac);
} else
bwn_phy_lp_set_rccap(mac);
error = bwn_phy_lp_switch_channel(mac, 7);
if (error)
device_printf(sc->sc_dev,
"failed to change channel 7 (%d)\n", error);
bwn_phy_lp_txpctl_init(mac);
bwn_phy_lp_calib(mac);
return (0);
}
static uint16_t
bwn_phy_lp_read(struct bwn_mac *mac, uint16_t reg)
{
BWN_WRITE_2(mac, BWN_PHYCTL, reg);
return (BWN_READ_2(mac, BWN_PHYDATA));
}
static void
bwn_phy_lp_write(struct bwn_mac *mac, uint16_t reg, uint16_t value)
{
BWN_WRITE_2(mac, BWN_PHYCTL, reg);
BWN_WRITE_2(mac, BWN_PHYDATA, value);
}
static void
bwn_phy_lp_maskset(struct bwn_mac *mac, uint16_t reg, uint16_t mask,
uint16_t set)
{
BWN_WRITE_2(mac, BWN_PHYCTL, reg);
BWN_WRITE_2(mac, BWN_PHYDATA,
(BWN_READ_2(mac, BWN_PHYDATA) & mask) | set);
}
static uint16_t
bwn_phy_lp_rf_read(struct bwn_mac *mac, uint16_t reg)
{
KASSERT(reg != 1, ("unaccessible register %d", reg));
if (mac->mac_phy.rev < 2 && reg != 0x4001)
reg |= 0x100;
if (mac->mac_phy.rev >= 2)
reg |= 0x200;
BWN_WRITE_2(mac, BWN_RFCTL, reg);
return BWN_READ_2(mac, BWN_RFDATALO);
}
static void
bwn_phy_lp_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value)
{
KASSERT(reg != 1, ("unaccessible register %d", reg));
BWN_WRITE_2(mac, BWN_RFCTL, reg);
BWN_WRITE_2(mac, BWN_RFDATALO, value);
}
static void
bwn_phy_lp_rf_onoff(struct bwn_mac *mac, int on)
{
if (on) {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xe0ff);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2,
(mac->mac_phy.rev >= 2) ? 0xf7f7 : 0xffe7);
return;
}
if (mac->mac_phy.rev >= 2) {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x83ff);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00);
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0x80ff);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xdfff);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0808);
return;
}
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xe0ff);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfcff);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0018);
}
static int
bwn_phy_lp_switch_channel(struct bwn_mac *mac, uint32_t chan)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_lp *plp = &phy->phy_lp;
int error;
if (phy->rf_ver == 0x2063) {
error = bwn_phy_lp_b2063_switch_channel(mac, chan);
if (error)
return (error);
} else {
error = bwn_phy_lp_b2062_switch_channel(mac, chan);
if (error)
return (error);
bwn_phy_lp_set_anafilter(mac, chan);
bwn_phy_lp_set_gaintbl(mac, ieee80211_ieee2mhz(chan, 0));
}
plp->plp_chan = chan;
BWN_WRITE_2(mac, BWN_CHANNEL, chan);
return (0);
}
static uint32_t
bwn_phy_lp_get_default_chan(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
return (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? 1 : 36);
}
static void
bwn_phy_lp_set_antenna(struct bwn_mac *mac, int antenna)
{
struct bwn_phy *phy = &mac->mac_phy;
struct bwn_phy_lp *plp = &phy->phy_lp;
if (phy->rev >= 2 || antenna > BWN_ANTAUTO1)
return;
bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER);
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffd, antenna & 0x2);
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffe, antenna & 0x1);
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_UCODE_ANTDIV_HELPER);
plp->plp_antenna = antenna;
}
static void
bwn_phy_lp_task_60s(struct bwn_mac *mac)
{
bwn_phy_lp_calib(mac);
}
static void
bwn_phy_lp_readsprom(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
plp->plp_txisoband_m = siba_sprom_get_tri2g(sc->sc_dev);
plp->plp_bxarch = siba_sprom_get_bxa2g(sc->sc_dev);
plp->plp_rxpwroffset = siba_sprom_get_rxpo2g(sc->sc_dev);
plp->plp_rssivf = siba_sprom_get_rssismf2g(sc->sc_dev);
plp->plp_rssivc = siba_sprom_get_rssismc2g(sc->sc_dev);
plp->plp_rssigs = siba_sprom_get_rssisav2g(sc->sc_dev);
return;
}
plp->plp_txisoband_l = siba_sprom_get_tri5gl(sc->sc_dev);
plp->plp_txisoband_m = siba_sprom_get_tri5g(sc->sc_dev);
plp->plp_txisoband_h = siba_sprom_get_tri5gh(sc->sc_dev);
plp->plp_bxarch = siba_sprom_get_bxa5g(sc->sc_dev);
plp->plp_rxpwroffset = siba_sprom_get_rxpo5g(sc->sc_dev);
plp->plp_rssivf = siba_sprom_get_rssismf5g(sc->sc_dev);
plp->plp_rssivc = siba_sprom_get_rssismc5g(sc->sc_dev);
plp->plp_rssigs = siba_sprom_get_rssisav5g(sc->sc_dev);
}
static void
bwn_phy_lp_bbinit(struct bwn_mac *mac)
{
bwn_phy_lp_tblinit(mac);
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_bbinit_r2(mac);
else
bwn_phy_lp_bbinit_r01(mac);
}
static void
bwn_phy_lp_txpctl_init(struct bwn_mac *mac)
{
struct bwn_txgain gain_2ghz = { 4, 12, 12, 0 };
struct bwn_txgain gain_5ghz = { 7, 15, 14, 0 };
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
bwn_phy_lp_set_txgain(mac,
IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? &gain_2ghz : &gain_5ghz);
bwn_phy_lp_set_bbmult(mac, 150);
}
static void
bwn_phy_lp_calib(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct bwn_rxcompco *rc = NULL;
struct bwn_txgain ogain;
int i, omode, oafeovr, orf, obbmult;
uint8_t mode, fc = 0;
if (plp->plp_chanfullcal != plp->plp_chan) {
plp->plp_chanfullcal = plp->plp_chan;
fc = 1;
}
bwn_mac_suspend(mac);
/* BlueTooth Coexistance Override */
BWN_WRITE_2(mac, BWN_BTCOEX_CTL, 0x3);
BWN_WRITE_2(mac, BWN_BTCOEX_TXCTL, 0xff);
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_digflt_save(mac);
bwn_phy_lp_get_txpctlmode(mac);
mode = plp->plp_txpctlmode;
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF);
if (mac->mac_phy.rev == 0 && mode != BWN_PHYLP_TXPCTL_OFF)
bwn_phy_lp_bugfix(mac);
if (mac->mac_phy.rev >= 2 && fc == 1) {
bwn_phy_lp_get_txpctlmode(mac);
omode = plp->plp_txpctlmode;
oafeovr = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40;
if (oafeovr)
ogain = bwn_phy_lp_get_txgain(mac);
orf = BWN_PHY_READ(mac, BWN_PHY_RF_PWR_OVERRIDE) & 0xff;
obbmult = bwn_phy_lp_get_bbmult(mac);
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF);
if (oafeovr)
bwn_phy_lp_set_txgain(mac, &ogain);
bwn_phy_lp_set_bbmult(mac, obbmult);
bwn_phy_lp_set_txpctlmode(mac, omode);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, orf);
}
bwn_phy_lp_set_txpctlmode(mac, mode);
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_digflt_restore(mac);
/* do RX IQ Calculation; assumes that noise is true. */
if (siba_get_chipid(sc->sc_dev) == 0x5354) {
for (i = 0; i < N(bwn_rxcompco_5354); i++) {
if (bwn_rxcompco_5354[i].rc_chan == plp->plp_chan)
rc = &bwn_rxcompco_5354[i];
}
} else if (mac->mac_phy.rev >= 2)
rc = &bwn_rxcompco_r2;
else {
for (i = 0; i < N(bwn_rxcompco_r12); i++) {
if (bwn_rxcompco_r12[i].rc_chan == plp->plp_chan)
rc = &bwn_rxcompco_r12[i];
}
}
if (rc == NULL)
goto fail;
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, rc->rc_c1);
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, rc->rc_c0 << 8);
bwn_phy_lp_set_trsw_over(mac, 1 /* TX */, 0 /* RX */);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7, 0);
} else {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf, 0);
}
bwn_phy_lp_set_rxgain(mac, 0x2d5d);
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe);
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800);
bwn_phy_lp_set_deaf(mac, 0);
/* XXX no checking return value? */
(void)bwn_phy_lp_calc_rx_iq_comp(mac, 0xfff0);
bwn_phy_lp_clear_deaf(mac, 0);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffc);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfff7);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffdf);
/* disable RX GAIN override. */
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffe);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffef);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffbf);
if (mac->mac_phy.rev >= 2) {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfbff);
BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xe5), 0xfff7);
}
} else {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfdff);
}
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe);
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xf7ff);
fail:
bwn_mac_enable(mac);
}
static void
bwn_phy_lp_switch_analog(struct bwn_mac *mac, int on)
{
if (on) {
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfff8);
return;
}
BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVRVAL, 0x0007);
BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x0007);
}
static int
bwn_phy_lp_b2063_switch_channel(struct bwn_mac *mac, uint8_t chan)
{
static const struct bwn_b206x_chan *bc = NULL;
struct bwn_softc *sc = mac->mac_sc;
uint32_t count, freqref, freqvco, freqxtal, val[3], timeout, timeoutref,
tmp[6];
uint16_t old, scale, tmp16;
int i, div;
for (i = 0; i < N(bwn_b2063_chantable); i++) {
if (bwn_b2063_chantable[i].bc_chan == chan) {
bc = &bwn_b2063_chantable[i];
break;
}
}
if (bc == NULL)
return (EINVAL);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_VCOBUF1, bc->bc_data[0]);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_MIXER2, bc->bc_data[1]);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_BUF2, bc->bc_data[2]);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_RCCR1, bc->bc_data[3]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_1ST3, bc->bc_data[4]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND1, bc->bc_data[5]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND4, bc->bc_data[6]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND7, bc->bc_data[7]);
BWN_RF_WRITE(mac, BWN_B2063_A_RX_PS6, bc->bc_data[8]);
BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL2, bc->bc_data[9]);
BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL5, bc->bc_data[10]);
BWN_RF_WRITE(mac, BWN_B2063_PA_CTL11, bc->bc_data[11]);
old = BWN_RF_READ(mac, BWN_B2063_COM15);
BWN_RF_SET(mac, BWN_B2063_COM15, 0x1e);
freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000;
freqvco = bc->bc_freq << ((bc->bc_freq > 4000) ? 1 : 2);
freqref = freqxtal * 3;
div = (freqxtal <= 26000000 ? 1 : 2);
timeout = ((((8 * freqxtal) / (div * 5000000)) + 1) >> 1) - 1;
timeoutref = ((((8 * freqxtal) / (div * (timeout + 1))) +
999999) / 1000000) + 1;
BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB3, 0x2);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB6,
0xfff8, timeout >> 2);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7,
0xff9f,timeout << 5);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB5, timeoutref);
val[0] = bwn_phy_lp_roundup(freqxtal, 1000000, 16);
val[1] = bwn_phy_lp_roundup(freqxtal, 1000000 * div, 16);
val[2] = bwn_phy_lp_roundup(freqvco, 3, 16);
count = (bwn_phy_lp_roundup(val[2], val[1] + 16, 16) * (timeout + 1) *
(timeoutref + 1)) - 1;
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7,
0xf0, count >> 8);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB8, count & 0xff);
tmp[0] = ((val[2] * 62500) / freqref) << 4;
tmp[1] = ((val[2] * 62500) % freqref) << 4;
while (tmp[1] >= freqref) {
tmp[0]++;
tmp[1] -= freqref;
}
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG1, 0xffe0, tmp[0] >> 4);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfe0f, tmp[0] << 4);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfff0, tmp[0] >> 16);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG3, (tmp[1] >> 8) & 0xff);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG4, tmp[1] & 0xff);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF1, 0xb9);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF2, 0x88);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF3, 0x28);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF4, 0x63);
tmp[2] = ((41 * (val[2] - 3000)) /1200) + 27;
tmp[3] = bwn_phy_lp_roundup(132000 * tmp[0], 8451, 16);
if ((tmp[3] + tmp[2] - 1) / tmp[2] > 60) {
scale = 1;
tmp[4] = ((tmp[3] + tmp[2]) / (tmp[2] << 1)) - 8;
} else {
scale = 0;
tmp[4] = ((tmp[3] + (tmp[2] >> 1)) / tmp[2]) - 8;
}
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffc0, tmp[4]);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffbf, scale << 6);
tmp[5] = bwn_phy_lp_roundup(100 * val[0], val[2], 16) * (tmp[4] * 8) *
(scale + 1);
if (tmp[5] > 150)
tmp[5] = 0;
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffe0, tmp[5]);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffdf, scale << 5);
BWN_RF_SETMASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfffb, 0x4);
if (freqxtal > 26000000)
BWN_RF_SET(mac, BWN_B2063_JTAG_XTAL_12, 0x2);
else
BWN_RF_MASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfd);
if (val[0] == 45)
BWN_RF_SET(mac, BWN_B2063_JTAG_VCO1, 0x2);
else
BWN_RF_MASK(mac, BWN_B2063_JTAG_VCO1, 0xfd);
BWN_RF_SET(mac, BWN_B2063_PLL_SP2, 0x3);
DELAY(1);
BWN_RF_MASK(mac, BWN_B2063_PLL_SP2, 0xfffc);
/* VCO Calibration */
BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, ~0x40);
tmp16 = BWN_RF_READ(mac, BWN_B2063_JTAG_CALNRST) & 0xf8;
BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16);
DELAY(1);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x4);
DELAY(1);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x6);
DELAY(1);
BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x7);
DELAY(300);
BWN_RF_SET(mac, BWN_B2063_PLL_SP1, 0x40);
BWN_RF_WRITE(mac, BWN_B2063_COM15, old);
return (0);
}
static int
bwn_phy_lp_b2062_switch_channel(struct bwn_mac *mac, uint8_t chan)
{
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
const struct bwn_b206x_chan *bc = NULL;
uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000;
uint32_t tmp[9];
int i;
for (i = 0; i < N(bwn_b2062_chantable); i++) {
if (bwn_b2062_chantable[i].bc_chan == chan) {
bc = &bwn_b2062_chantable[i];
break;
}
}
if (bc == NULL)
return (EINVAL);
BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL14, 0x04);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE0, bc->bc_data[0]);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE2, bc->bc_data[1]);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE3, bc->bc_data[2]);
BWN_RF_WRITE(mac, BWN_B2062_N_TX_TUNE, bc->bc_data[3]);
BWN_RF_WRITE(mac, BWN_B2062_S_LGENG_CTL1, bc->bc_data[4]);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL5, bc->bc_data[5]);
BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL6, bc->bc_data[6]);
BWN_RF_WRITE(mac, BWN_B2062_N_TX_PGA, bc->bc_data[7]);
BWN_RF_WRITE(mac, BWN_B2062_N_TX_PAD, bc->bc_data[8]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xcc);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0x07);
bwn_phy_lp_b2062_reset_pllbias(mac);
tmp[0] = freqxtal / 1000;
tmp[1] = plp->plp_div * 1000;
tmp[2] = tmp[1] * ieee80211_ieee2mhz(chan, 0);
if (ieee80211_ieee2mhz(chan, 0) < 4000)
tmp[2] *= 2;
tmp[3] = 48 * tmp[0];
tmp[5] = tmp[2] / tmp[3];
tmp[6] = tmp[2] % tmp[3];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL26, tmp[5]);
tmp[4] = tmp[6] * 0x100;
tmp[5] = tmp[4] / tmp[3];
tmp[6] = tmp[4] % tmp[3];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL27, tmp[5]);
tmp[4] = tmp[6] * 0x100;
tmp[5] = tmp[4] / tmp[3];
tmp[6] = tmp[4] % tmp[3];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL28, tmp[5]);
tmp[4] = tmp[6] * 0x100;
tmp[5] = tmp[4] / tmp[3];
tmp[6] = tmp[4] % tmp[3];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL29,
tmp[5] + ((2 * tmp[6]) / tmp[3]));
tmp[7] = BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL19);
tmp[8] = ((2 * tmp[2] * (tmp[7] + 1)) + (3 * tmp[0])) / (6 * tmp[0]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL23, (tmp[8] >> 8) + 16);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL24, tmp[8] & 0xff);
bwn_phy_lp_b2062_vco_calib(mac);
if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) {
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xfc);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0);
bwn_phy_lp_b2062_reset_pllbias(mac);
bwn_phy_lp_b2062_vco_calib(mac);
if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) {
BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04);
return (EIO);
}
}
BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04);
return (0);
}
static void
bwn_phy_lp_set_anafilter(struct bwn_mac *mac, uint8_t channel)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
uint16_t tmp = (channel == 14);
if (mac->mac_phy.rev < 2) {
BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xfcff, tmp << 9);
if ((mac->mac_phy.rev == 1) && (plp->plp_rccap))
bwn_phy_lp_set_rccap(mac);
return;
}
BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, 0x3f);
}
static void
bwn_phy_lp_set_gaintbl(struct bwn_mac *mac, uint32_t freq)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t iso, tmp[3];
KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__));
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
iso = plp->plp_txisoband_m;
else if (freq <= 5320)
iso = plp->plp_txisoband_l;
else if (freq <= 5700)
iso = plp->plp_txisoband_m;
else
iso = plp->plp_txisoband_h;
tmp[0] = ((iso - 26) / 12) << 12;
tmp[1] = tmp[0] + 0x1000;
tmp[2] = tmp[0] + 0x2000;
bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), 3, tmp);
bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), 3, tmp);
}
static void
bwn_phy_lp_digflt_save(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
int i;
static const uint16_t addr[] = {
BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2),
BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4),
BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6),
BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8),
BWN_PHY_OFDM(0xcf),
};
static const uint16_t val[] = {
0xde5e, 0xe832, 0xe331, 0x4d26,
0x0026, 0x1420, 0x0020, 0xfe08,
0x0008,
};
for (i = 0; i < N(addr); i++) {
plp->plp_digfilt[i] = BWN_PHY_READ(mac, addr[i]);
BWN_PHY_WRITE(mac, addr[i], val[i]);
}
}
static void
bwn_phy_lp_get_txpctlmode(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
uint16_t ctl;
ctl = BWN_PHY_READ(mac, BWN_PHY_TX_PWR_CTL_CMD);
switch (ctl & BWN_PHY_TX_PWR_CTL_CMD_MODE) {
case BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF:
plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_OFF;
break;
case BWN_PHY_TX_PWR_CTL_CMD_MODE_SW:
plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_SW;
break;
case BWN_PHY_TX_PWR_CTL_CMD_MODE_HW:
plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_HW;
break;
default:
plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_UNKNOWN;
device_printf(sc->sc_dev, "unknown command mode\n");
break;
}
}
static void
bwn_phy_lp_set_txpctlmode(struct bwn_mac *mac, uint8_t mode)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
uint16_t ctl;
uint8_t old;
bwn_phy_lp_get_txpctlmode(mac);
old = plp->plp_txpctlmode;
if (old == mode)
return;
plp->plp_txpctlmode = mode;
if (old != BWN_PHYLP_TXPCTL_ON_HW && mode == BWN_PHYLP_TXPCTL_ON_HW) {
BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, 0xff80,
plp->plp_tssiidx);
BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_NNUM,
0x8fff, ((uint16_t)plp->plp_tssinpt << 16));
/* disable TX GAIN override */
if (mac->mac_phy.rev < 2)
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff);
else {
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xff7f);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xbfff);
}
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xffbf);
plp->plp_txpwridx = -1;
}
if (mac->mac_phy.rev >= 2) {
if (mode == BWN_PHYLP_TXPCTL_ON_HW)
BWN_PHY_SET(mac, BWN_PHY_OFDM(0xd0), 0x2);
else
BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xd0), 0xfffd);
}
/* writes TX Power Control mode */
switch (plp->plp_txpctlmode) {
case BWN_PHYLP_TXPCTL_OFF:
ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF;
break;
case BWN_PHYLP_TXPCTL_ON_HW:
ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_HW;
break;
case BWN_PHYLP_TXPCTL_ON_SW:
ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_SW;
break;
default:
ctl = 0;
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD,
(uint16_t)~BWN_PHY_TX_PWR_CTL_CMD_MODE, ctl);
}
static void
bwn_phy_lp_bugfix(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
const unsigned int size = 256;
struct bwn_txgain tg;
uint32_t rxcomp, txgain, coeff, rfpwr, *tabs;
uint16_t tssinpt, tssiidx, value[2];
uint8_t mode;
int8_t txpwridx;
tabs = (uint32_t *)malloc(sizeof(uint32_t) * size, M_DEVBUF,
M_NOWAIT | M_ZERO);
if (tabs == NULL) {
device_printf(sc->sc_dev, "failed to allocate buffer.\n");
return;
}
bwn_phy_lp_get_txpctlmode(mac);
mode = plp->plp_txpctlmode;
txpwridx = plp->plp_txpwridx;
tssinpt = plp->plp_tssinpt;
tssiidx = plp->plp_tssiidx;
bwn_tab_read_multi(mac,
(mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) :
BWN_TAB_4(7, 0x140), size, tabs);
bwn_phy_lp_tblinit(mac);
bwn_phy_lp_bbinit(mac);
bwn_phy_lp_txpctl_init(mac);
bwn_phy_lp_rf_onoff(mac, 1);
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF);
bwn_tab_write_multi(mac,
(mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) :
BWN_TAB_4(7, 0x140), size, tabs);
BWN_WRITE_2(mac, BWN_CHANNEL, plp->plp_chan);
plp->plp_tssinpt = tssinpt;
plp->plp_tssiidx = tssiidx;
bwn_phy_lp_set_anafilter(mac, plp->plp_chan);
if (txpwridx != -1) {
/* set TX power by index */
plp->plp_txpwridx = txpwridx;
bwn_phy_lp_get_txpctlmode(mac);
if (plp->plp_txpctlmode != BWN_PHYLP_TXPCTL_OFF)
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_ON_SW);
if (mac->mac_phy.rev >= 2) {
rxcomp = bwn_tab_read(mac,
BWN_TAB_4(7, txpwridx + 320));
txgain = bwn_tab_read(mac,
BWN_TAB_4(7, txpwridx + 192));
tg.tg_pad = (txgain >> 16) & 0xff;
tg.tg_gm = txgain & 0xff;
tg.tg_pga = (txgain >> 8) & 0xff;
tg.tg_dac = (rxcomp >> 28) & 0xff;
bwn_phy_lp_set_txgain(mac, &tg);
} else {
rxcomp = bwn_tab_read(mac,
BWN_TAB_4(10, txpwridx + 320));
txgain = bwn_tab_read(mac,
BWN_TAB_4(10, txpwridx + 192));
BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL,
0xf800, (txgain >> 4) & 0x7fff);
bwn_phy_lp_set_txgain_dac(mac, txgain & 0x7);
bwn_phy_lp_set_txgain_pa(mac, (txgain >> 24) & 0x7f);
}
bwn_phy_lp_set_bbmult(mac, (rxcomp >> 20) & 0xff);
/* set TX IQCC */
value[0] = (rxcomp >> 10) & 0x3ff;
value[1] = rxcomp & 0x3ff;
bwn_tab_write_multi(mac, BWN_TAB_2(0, 80), 2, value);
coeff = bwn_tab_read(mac,
(mac->mac_phy.rev >= 2) ? BWN_TAB_4(7, txpwridx + 448) :
BWN_TAB_4(10, txpwridx + 448));
bwn_tab_write(mac, BWN_TAB_2(0, 85), coeff & 0xffff);
if (mac->mac_phy.rev >= 2) {
rfpwr = bwn_tab_read(mac,
BWN_TAB_4(7, txpwridx + 576));
BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00,
rfpwr & 0xffff);
}
bwn_phy_lp_set_txgain_override(mac);
}
if (plp->plp_rccap)
bwn_phy_lp_set_rccap(mac);
bwn_phy_lp_set_antenna(mac, plp->plp_antenna);
bwn_phy_lp_set_txpctlmode(mac, mode);
free(tabs, M_DEVBUF);
}
static void
bwn_phy_lp_digflt_restore(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
int i;
static const uint16_t addr[] = {
BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2),
BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4),
BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6),
BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8),
BWN_PHY_OFDM(0xcf),
};
for (i = 0; i < N(addr); i++)
BWN_PHY_WRITE(mac, addr[i], plp->plp_digfilt[i]);
}
static void
bwn_phy_lp_tblinit(struct bwn_mac *mac)
{
uint32_t freq = ieee80211_ieee2mhz(bwn_phy_lp_get_default_chan(mac), 0);
if (mac->mac_phy.rev < 2) {
bwn_phy_lp_tblinit_r01(mac);
bwn_phy_lp_tblinit_txgain(mac);
bwn_phy_lp_set_gaintbl(mac, freq);
return;
}
bwn_phy_lp_tblinit_r2(mac);
bwn_phy_lp_tblinit_txgain(mac);
}
struct bwn_wpair {
uint16_t reg;
uint16_t value;
};
struct bwn_smpair {
uint16_t offset;
uint16_t mask;
uint16_t set;
};
static void
bwn_phy_lp_bbinit_r2(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_wpair v1[] = {
{ BWN_PHY_AFE_DAC_CTL, 0x50 },
{ BWN_PHY_AFE_CTL, 0x8800 },
{ BWN_PHY_AFE_CTL_OVR, 0 },
{ BWN_PHY_AFE_CTL_OVRVAL, 0 },
{ BWN_PHY_RF_OVERRIDE_0, 0 },
{ BWN_PHY_RF_OVERRIDE_2, 0 },
{ BWN_PHY_OFDM(0xf9), 0 },
{ BWN_PHY_TR_LOOKUP_1, 0 }
};
static const struct bwn_smpair v2[] = {
{ BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0xb4 },
{ BWN_PHY_DCOFFSETTRANSIENT, 0xf8ff, 0x200 },
{ BWN_PHY_DCOFFSETTRANSIENT, 0xff00, 0x7f },
{ BWN_PHY_GAINDIRECTMISMATCH, 0xff0f, 0x40 },
{ BWN_PHY_PREAMBLECONFIRMTO, 0xff00, 0x2 }
};
static const struct bwn_smpair v3[] = {
{ BWN_PHY_OFDM(0xfe), 0xffe0, 0x1f },
{ BWN_PHY_OFDM(0xff), 0xffe0, 0xc },
{ BWN_PHY_OFDM(0x100), 0xff00, 0x19 },
{ BWN_PHY_OFDM(0xff), 0x03ff, 0x3c00 },
{ BWN_PHY_OFDM(0xfe), 0xfc1f, 0x3e0 },
{ BWN_PHY_OFDM(0xff), 0xffe0, 0xc },
{ BWN_PHY_OFDM(0x100), 0x00ff, 0x1900 },
{ BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800 },
{ BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x12 },
{ BWN_PHY_GAINMISMATCH, 0x0fff, 0x9000 },
};
int i;
for (i = 0; i < N(v1); i++)
BWN_PHY_WRITE(mac, v1[i].reg, v1[i].value);
BWN_PHY_SET(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x10);
for (i = 0; i < N(v2); i++)
BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set);
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x4000);
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x2000);
BWN_PHY_SET(mac, BWN_PHY_OFDM(0x10a), 0x1);
if (siba_get_pci_revid(sc->sc_dev) >= 0x18) {
bwn_tab_write(mac, BWN_TAB_4(17, 65), 0xec);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x14);
} else {
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x10);
}
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0xff00, 0xf4);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0x00ff, 0xf100);
BWN_PHY_WRITE(mac, BWN_PHY_CLIPTHRESH, 0x48);
BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0xff00, 0x46);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe4), 0xff00, 0x10);
BWN_PHY_SETMASK(mac, BWN_PHY_PWR_THRESH1, 0xfff0, 0x9);
BWN_PHY_MASK(mac, BWN_PHY_GAINDIRECTMISMATCH, ~0xf);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5500);
BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0xa0);
BWN_PHY_SETMASK(mac, BWN_PHY_GAINDIRECTMISMATCH, 0xe0ff, 0x300);
BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2a00);
if ((siba_get_chipid(sc->sc_dev) == 0x4325) &&
(siba_get_chiprev(sc->sc_dev) == 0)) {
BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xa);
} else {
BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x1e00);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xd);
}
for (i = 0; i < N(v3); i++)
BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set);
if ((siba_get_chipid(sc->sc_dev) == 0x4325) &&
(siba_get_chiprev(sc->sc_dev) == 0)) {
bwn_tab_write(mac, BWN_TAB_2(0x08, 0x14), 0);
bwn_tab_write(mac, BWN_TAB_2(0x08, 0x12), 0x40);
}
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x40);
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0xb00);
BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x6);
BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0x9d00);
BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0xff00, 0xa1);
BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff);
} else
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x40);
BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0xff00, 0xb3);
BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00);
BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset);
BWN_PHY_SET(mac, BWN_PHY_RESET_CTL, 0x44);
BWN_PHY_WRITE(mac, BWN_PHY_RESET_CTL, 0x80);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, 0xa954);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_1,
0x2000 | ((uint16_t)plp->plp_rssigs << 10) |
((uint16_t)plp->plp_rssivc << 4) | plp->plp_rssivf);
if ((siba_get_chipid(sc->sc_dev) == 0x4325) &&
(siba_get_chiprev(sc->sc_dev) == 0)) {
BWN_PHY_SET(mac, BWN_PHY_AFE_ADC_CTL_0, 0x1c);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_CTL, 0x00ff, 0x8800);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_1, 0xfc3c, 0x0400);
}
bwn_phy_lp_digflt_save(mac);
}
static void
bwn_phy_lp_bbinit_r01(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_smpair v1[] = {
{ BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x0005 },
{ BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0x0180 },
{ BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x3c00 },
{ BWN_PHY_GAINDIRECTMISMATCH, 0xfff0, 0x0005 },
{ BWN_PHY_GAIN_MISMATCH_LIMIT, 0xffc0, 0x001a },
{ BWN_PHY_CRS_ED_THRESH, 0xff00, 0x00b3 },
{ BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00 }
};
static const struct bwn_smpair v2[] = {
{ BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_1, 0x3f00, 0x0900 },
{ BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 },
{ BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0400 },
{ BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0b00 },
{ BWN_PHY_TR_LOOKUP_5, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_5, 0xc0ff, 0x0900 },
{ BWN_PHY_TR_LOOKUP_6, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_6, 0xc0ff, 0x0b00 },
{ BWN_PHY_TR_LOOKUP_7, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_7, 0xc0ff, 0x0900 },
{ BWN_PHY_TR_LOOKUP_8, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_8, 0xc0ff, 0x0b00 }
};
static const struct bwn_smpair v3[] = {
{ BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0001 },
{ BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0400 },
{ BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0001 },
{ BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0500 },
{ BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 },
{ BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0800 },
{ BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 },
{ BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0a00 }
};
static const struct bwn_smpair v4[] = {
{ BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0004 },
{ BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0800 },
{ BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0004 },
{ BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0c00 },
{ BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 },
{ BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0100 },
{ BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 },
{ BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0300 }
};
static const struct bwn_smpair v5[] = {
{ BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0900 },
{ BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a },
{ BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 },
{ BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0006 },
{ BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0500 },
{ BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0006 },
{ BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0700 }
};
int i;
uint16_t tmp, tmp2;
BWN_PHY_MASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf7ff);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL, 0);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, 0);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, 0);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0);
BWN_PHY_SET(mac, BWN_PHY_AFE_DAC_CTL, 0x0004);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0x0078);
BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800);
BWN_PHY_WRITE(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x0016);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_0, 0xfff8, 0x0004);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5400);
BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2400);
BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100);
BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0x0006);
BWN_PHY_MASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfffe);
for (i = 0; i < N(v1); i++)
BWN_PHY_SETMASK(mac, v1[i].offset, v1[i].mask, v1[i].set);
BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB,
0xff00, plp->plp_rxpwroffset);
if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) &&
((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ||
(siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF))) {
siba_cc_pmu_set_ldovolt(sc->sc_dev, SIBA_LDO_PAREF, 0x28);
siba_cc_pmu_set_ldoparef(sc->sc_dev, 1);
if (mac->mac_phy.rev == 0)
BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT,
0xffcf, 0x0010);
bwn_tab_write(mac, BWN_TAB_2(11, 7), 60);
} else {
siba_cc_pmu_set_ldoparef(sc->sc_dev, 0);
BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0020);
bwn_tab_write(mac, BWN_TAB_2(11, 7), 100);
}
tmp = plp->plp_rssivf | plp->plp_rssivc << 4 | 0xa000;
BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, tmp);
if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_RSSIINV)
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x0aaa);
else
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x02aa);
bwn_tab_write(mac, BWN_TAB_2(11, 1), 24);
BWN_PHY_SETMASK(mac, BWN_PHY_RX_RADIO_CTL,
0xfff9, (plp->plp_bxarch << 1));
if (mac->mac_phy.rev == 1 &&
(siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT)) {
for (i = 0; i < N(v2); i++)
BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask,
v2[i].set);
} else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) ||
(siba_get_pci_subdevice(sc->sc_dev) == 0x048a) ||
((mac->mac_phy.rev == 0) &&
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM))) {
for (i = 0; i < N(v3); i++)
BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask,
v3[i].set);
} else if (mac->mac_phy.rev == 1 ||
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM)) {
for (i = 0; i < N(v4); i++)
BWN_PHY_SETMASK(mac, v4[i].offset, v4[i].mask,
v4[i].set);
} else {
for (i = 0; i < N(v5); i++)
BWN_PHY_SETMASK(mac, v5[i].offset, v5[i].mask,
v5[i].set);
}
if (mac->mac_phy.rev == 1 &&
(siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF)) {
BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_5, BWN_PHY_TR_LOOKUP_1);
BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_6, BWN_PHY_TR_LOOKUP_2);
BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_7, BWN_PHY_TR_LOOKUP_3);
BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_8, BWN_PHY_TR_LOOKUP_4);
}
if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT) &&
(siba_get_chipid(sc->sc_dev) == 0x5354) &&
(siba_get_chippkg(sc->sc_dev) == SIBA_CHIPPACK_BCM4712S)) {
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0006);
BWN_PHY_WRITE(mac, BWN_PHY_GPIO_SELECT, 0x0005);
BWN_PHY_WRITE(mac, BWN_PHY_GPIO_OUTEN, 0xffff);
bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_PR45960W);
}
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x8000);
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0040);
BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0xa400);
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0x0b00);
BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x0007);
BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xfff8, 0x0003);
BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xffc7, 0x0020);
BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff);
} else {
BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0x7fff);
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xffbf);
}
if (mac->mac_phy.rev == 1) {
tmp = BWN_PHY_READ(mac, BWN_PHY_CLIPCTRTHRESH);
tmp2 = (tmp & 0x03e0) >> 5;
tmp2 |= tmp2 << 5;
BWN_PHY_WRITE(mac, BWN_PHY_4C3, tmp2);
tmp = BWN_PHY_READ(mac, BWN_PHY_GAINDIRECTMISMATCH);
tmp2 = (tmp & 0x1f00) >> 8;
tmp2 |= tmp2 << 5;
BWN_PHY_WRITE(mac, BWN_PHY_4C4, tmp2);
tmp = BWN_PHY_READ(mac, BWN_PHY_VERYLOWGAINDB);
tmp2 = tmp & 0x00ff;
tmp2 |= tmp << 8;
BWN_PHY_WRITE(mac, BWN_PHY_4C5, tmp2);
}
}
struct bwn_b2062_freq {
uint16_t freq;
uint8_t value[6];
};
static void
bwn_phy_lp_b2062_init(struct bwn_mac *mac)
{
#define CALC_CTL7(freq, div) \
(((800000000 * (div) + (freq)) / (2 * (freq)) - 8) & 0xff)
#define CALC_CTL18(freq, div) \
((((100 * (freq) + 16000000 * (div)) / (32000000 * (div))) - 1) & 0xff)
#define CALC_CTL19(freq, div) \
((((2 * (freq) + 1000000 * (div)) / (2000000 * (div))) - 1) & 0xff)
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_b2062_freq freqdata_tab[] = {
{ 12000, { 6, 6, 6, 6, 10, 6 } },
{ 13000, { 4, 4, 4, 4, 11, 7 } },
{ 14400, { 3, 3, 3, 3, 12, 7 } },
{ 16200, { 3, 3, 3, 3, 13, 8 } },
{ 18000, { 2, 2, 2, 2, 14, 8 } },
{ 19200, { 1, 1, 1, 1, 14, 9 } }
};
static const struct bwn_wpair v1[] = {
{ BWN_B2062_N_TXCTL3, 0 },
{ BWN_B2062_N_TXCTL4, 0 },
{ BWN_B2062_N_TXCTL5, 0 },
{ BWN_B2062_N_TXCTL6, 0 },
{ BWN_B2062_N_PDNCTL0, 0x40 },
{ BWN_B2062_N_PDNCTL0, 0 },
{ BWN_B2062_N_CALIB_TS, 0x10 },
{ BWN_B2062_N_CALIB_TS, 0 }
};
const struct bwn_b2062_freq *f = NULL;
uint32_t xtalfreq, ref;
unsigned int i;
bwn_phy_lp_b2062_tblinit(mac);
for (i = 0; i < N(v1); i++)
BWN_RF_WRITE(mac, v1[i].reg, v1[i].value);
if (mac->mac_phy.rev > 0)
BWN_RF_WRITE(mac, BWN_B2062_S_BG_CTL1,
(BWN_RF_READ(mac, BWN_B2062_N_COM2) >> 1) | 0x80);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
BWN_RF_SET(mac, BWN_B2062_N_TSSI_CTL0, 0x1);
else
BWN_RF_MASK(mac, BWN_B2062_N_TSSI_CTL0, ~0x1);
KASSERT(siba_get_cc_caps(sc->sc_dev) & SIBA_CC_CAPS_PMU,
("%s:%d: fail", __func__, __LINE__));
xtalfreq = siba_get_cc_pmufreq(sc->sc_dev) * 1000;
KASSERT(xtalfreq != 0, ("%s:%d: fail", __func__, __LINE__));
if (xtalfreq <= 30000000) {
plp->plp_div = 1;
BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL1, 0xfffb);
} else {
plp->plp_div = 2;
BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL1, 0x4);
}
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL7,
CALC_CTL7(xtalfreq, plp->plp_div));
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL18,
CALC_CTL18(xtalfreq, plp->plp_div));
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL19,
CALC_CTL19(xtalfreq, plp->plp_div));
ref = (1000 * plp->plp_div + 2 * xtalfreq) / (2000 * plp->plp_div);
ref &= 0xffff;
for (i = 0; i < N(freqdata_tab); i++) {
if (ref < freqdata_tab[i].freq) {
f = &freqdata_tab[i];
break;
}
}
if (f == NULL)
f = &freqdata_tab[N(freqdata_tab) - 1];
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL8,
((uint16_t)(f->value[1]) << 4) | f->value[0]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL9,
((uint16_t)(f->value[3]) << 4) | f->value[2]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL10, f->value[4]);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL11, f->value[5]);
#undef CALC_CTL7
#undef CALC_CTL18
#undef CALC_CTL19
}
static void
bwn_phy_lp_b2063_init(struct bwn_mac *mac)
{
bwn_phy_lp_b2063_tblinit(mac);
BWN_RF_WRITE(mac, BWN_B2063_LOGEN_SP5, 0);
BWN_RF_SET(mac, BWN_B2063_COM8, 0x38);
BWN_RF_WRITE(mac, BWN_B2063_REG_SP1, 0x56);
BWN_RF_MASK(mac, BWN_B2063_RX_BB_CTL2, ~0x2);
BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0);
BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP6, 0x20);
BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP9, 0x40);
if (mac->mac_phy.rev == 2) {
BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0xa0);
BWN_RF_WRITE(mac, BWN_B2063_PA_SP4, 0xa0);
BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x18);
} else {
BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0x20);
BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x20);
}
}
static void
bwn_phy_lp_rxcal_r2(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
static const struct bwn_wpair v1[] = {
{ BWN_B2063_RX_BB_SP8, 0x0 },
{ BWN_B2063_RC_CALIB_CTL1, 0x7e },
{ BWN_B2063_RC_CALIB_CTL1, 0x7c },
{ BWN_B2063_RC_CALIB_CTL2, 0x15 },
{ BWN_B2063_RC_CALIB_CTL3, 0x70 },
{ BWN_B2063_RC_CALIB_CTL4, 0x52 },
{ BWN_B2063_RC_CALIB_CTL5, 0x1 },
{ BWN_B2063_RC_CALIB_CTL1, 0x7d }
};
static const struct bwn_wpair v2[] = {
{ BWN_B2063_TX_BB_SP3, 0x0 },
{ BWN_B2063_RC_CALIB_CTL1, 0x7e },
{ BWN_B2063_RC_CALIB_CTL1, 0x7c },
{ BWN_B2063_RC_CALIB_CTL2, 0x55 },
{ BWN_B2063_RC_CALIB_CTL3, 0x76 }
};
uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000;
int i;
uint8_t tmp;
tmp = BWN_RF_READ(mac, BWN_B2063_RX_BB_SP8) & 0xff;
for (i = 0; i < 2; i++)
BWN_RF_WRITE(mac, v1[i].reg, v1[i].value);
BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, 0xf7);
for (i = 2; i < N(v1); i++)
BWN_RF_WRITE(mac, v1[i].reg, v1[i].value);
for (i = 0; i < 10000; i++) {
if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)
break;
DELAY(1000);
}
if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2))
BWN_RF_WRITE(mac, BWN_B2063_RX_BB_SP8, tmp);
tmp = BWN_RF_READ(mac, BWN_B2063_TX_BB_SP3) & 0xff;
for (i = 0; i < N(v2); i++)
BWN_RF_WRITE(mac, v2[i].reg, v2[i].value);
if (freqxtal == 24000000) {
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0xfc);
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x0);
} else {
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0x13);
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x1);
}
BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0x7d);
for (i = 0; i < 10000; i++) {
if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)
break;
DELAY(1000);
}
if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2))
BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, tmp);
BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL1, 0x7e);
}
static void
bwn_phy_lp_rccal_r12(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
struct bwn_phy_lp_iq_est ie;
struct bwn_txgain tx_gains;
static const uint32_t pwrtbl[21] = {
0x10000, 0x10557, 0x10e2d, 0x113e0, 0x10f22, 0x0ff64,
0x0eda2, 0x0e5d4, 0x0efd1, 0x0fbe8, 0x0b7b8, 0x04b35,
0x01a5e, 0x00a0b, 0x00444, 0x001fd, 0x000ff, 0x00088,
0x0004c, 0x0002c, 0x0001a,
};
uint32_t npwr, ipwr, sqpwr, tmp;
int loopback, i, j, sum, error;
uint16_t save[7];
uint8_t txo, bbmult, txpctlmode;
error = bwn_phy_lp_switch_channel(mac, 7);
if (error)
device_printf(sc->sc_dev,
"failed to change channel to 7 (%d)\n", error);
txo = (BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40) ? 1 : 0;
bbmult = bwn_phy_lp_get_bbmult(mac);
if (txo)
tx_gains = bwn_phy_lp_get_txgain(mac);
save[0] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_0);
save[1] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_VAL_0);
save[2] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR);
save[3] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVRVAL);
save[4] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2);
save[5] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2_VAL);
save[6] = BWN_PHY_READ(mac, BWN_PHY_LP_PHY_CTL);
bwn_phy_lp_get_txpctlmode(mac);
txpctlmode = plp->plp_txpctlmode;
bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF);
/* disable CRS */
bwn_phy_lp_set_deaf(mac, 1);
bwn_phy_lp_set_trsw_over(mac, 0, 1);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffb);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x4);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x10);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffbf);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x7);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x38);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x100);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff);
BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL0, 0);
BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL1, 1);
BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL2, 0x20);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff);
BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0);
BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, 0x45af);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0x3ff);
loopback = bwn_phy_lp_loopback(mac);
if (loopback == -1)
goto done;
bwn_phy_lp_set_rxgain_idx(mac, loopback);
BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xffbf, 0x40);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfff8, 0x1);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xffc7, 0x8);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f, 0xc0);
tmp = 0;
memset(&ie, 0, sizeof(ie));
for (i = 128; i <= 159; i++) {
BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, i);
sum = 0;
for (j = 5; j <= 25; j++) {
bwn_phy_lp_ddfs_turnon(mac, 1, 1, j, j, 0);
if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie)))
goto done;
sqpwr = ie.ie_ipwr + ie.ie_qpwr;
ipwr = ((pwrtbl[j - 5] >> 3) + 1) >> 1;
npwr = bwn_phy_lp_roundup(sqpwr, (j == 5) ? sqpwr : 0,
12);
sum += ((ipwr - npwr) * (ipwr - npwr));
if ((i == 128) || (sum < tmp)) {
plp->plp_rccap = i;
tmp = sum;
}
}
}
bwn_phy_lp_ddfs_turnoff(mac);
done:
/* restore CRS */
bwn_phy_lp_clear_deaf(mac, 1);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xff80);
BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfc00);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_VAL_0, save[1]);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, save[0]);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVRVAL, save[3]);
BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, save[2]);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2_VAL, save[5]);
BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, save[4]);
BWN_PHY_WRITE(mac, BWN_PHY_LP_PHY_CTL, save[6]);
bwn_phy_lp_set_bbmult(mac, bbmult);
if (txo)
bwn_phy_lp_set_txgain(mac, &tx_gains);
bwn_phy_lp_set_txpctlmode(mac, txpctlmode);
if (plp->plp_rccap)
bwn_phy_lp_set_rccap(mac);
}
static void
bwn_phy_lp_set_rccap(struct bwn_mac *mac)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
uint8_t rc_cap = (plp->plp_rccap & 0x1f) >> 1;
if (mac->mac_phy.rev == 1)
rc_cap = MIN(rc_cap + 5, 15);
BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2,
MAX(plp->plp_rccap - 4, 0x80));
BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, rc_cap | 0x80);
BWN_RF_WRITE(mac, BWN_B2062_S_RXG_CNT16,
((plp->plp_rccap & 0x1f) >> 2) | 0x80);
}
static uint32_t
bwn_phy_lp_roundup(uint32_t value, uint32_t div, uint8_t pre)
{
uint32_t i, q, r;
if (div == 0)
return (0);
for (i = 0, q = value / div, r = value % div; i < pre; i++) {
q <<= 1;
if (r << 1 >= div) {
q++;
r = (r << 1) - div;
}
}
if (r << 1 >= div)
q++;
return (q);
}
static void
bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0xff);
DELAY(20);
if (siba_get_chipid(sc->sc_dev) == 0x5354) {
BWN_RF_WRITE(mac, BWN_B2062_N_COM1, 4);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 4);
} else {
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0);
}
DELAY(5);
}
static void
bwn_phy_lp_b2062_vco_calib(struct bwn_mac *mac)
{
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x42);
BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x62);
DELAY(200);
}
static void
bwn_phy_lp_b2062_tblinit(struct bwn_mac *mac)
{
#define FLAG_A 0x01
#define FLAG_G 0x02
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_b206x_rfinit_entry bwn_b2062_init_tab[] = {
{ BWN_B2062_N_COM4, 0x1, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_PDNCTL1, 0x0, 0xca, FLAG_G, },
{ BWN_B2062_N_PDNCTL3, 0x0, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_PDNCTL4, 0x15, 0x2a, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENC, 0xDB, 0xff, FLAG_A, },
{ BWN_B2062_N_LGENATUNE0, 0xdd, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENATUNE2, 0xdd, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENATUNE3, 0x77, 0xB5, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENACTL3, 0x0, 0xff, FLAG_A | FLAG_G, },
{ BWN_B2062_N_LGENACTL7, 0x33, 0x33, FLAG_A | FLAG_G, },
{ BWN_B2062_N_RXA_CTL1, 0x0, 0x0, FLAG_G, },
{ BWN_B2062_N_RXBB_CTL0, 0x82, 0x80, FLAG_A | FLAG_G, },
{ BWN_B2062_N_RXBB_GAIN1, 0x4, 0x4, FLAG_A | FLAG_G, },
{ BWN_B2062_N_RXBB_GAIN2, 0x0, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_N_TXCTL4, 0x3, 0x3, FLAG_A | FLAG_G, },
{ BWN_B2062_N_TXCTL5, 0x2, 0x2, FLAG_A | FLAG_G, },
{ BWN_B2062_N_TX_TUNE, 0x88, 0x1b, FLAG_A | FLAG_G, },
{ BWN_B2062_S_COM4, 0x1, 0x0, FLAG_A | FLAG_G, },
{ BWN_B2062_S_PDS_CTL0, 0xff, 0xff, FLAG_A | FLAG_G, },
{ BWN_B2062_S_LGENG_CTL0, 0xf8, 0xd8, FLAG_A | FLAG_G, },
{ BWN_B2062_S_LGENG_CTL1, 0x3c, 0x24, FLAG_A | FLAG_G, },
{ BWN_B2062_S_LGENG_CTL8, 0x88, 0x80, FLAG_A | FLAG_G, },
{ BWN_B2062_S_LGENG_CTL10, 0x88, 0x80, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL0, 0x98, 0x98, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL1, 0x10, 0x10, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL5, 0x43, 0x43, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL6, 0x47, 0x47, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL7, 0xc, 0xc, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL8, 0x11, 0x11, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL9, 0x11, 0x11, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL10, 0xe, 0xe, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL11, 0x8, 0x8, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL12, 0x33, 0x33, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL13, 0xa, 0xa, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL14, 0x6, 0x6, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL18, 0x3e, 0x3e, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL19, 0x13, 0x13, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL21, 0x62, 0x62, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL22, 0x7, 0x7, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL23, 0x16, 0x16, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL24, 0x5c, 0x5c, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL25, 0x95, 0x95, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL30, 0xa0, 0xa0, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL31, 0x4, 0x4, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL33, 0xcc, 0xcc, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RFPLLCTL34, 0x7, 0x7, FLAG_A | FLAG_G, },
{ BWN_B2062_S_RXG_CNT8, 0xf, 0xf, FLAG_A, },
};
const struct bwn_b206x_rfinit_entry *br;
unsigned int i;
for (i = 0; i < N(bwn_b2062_init_tab); i++) {
br = &bwn_b2062_init_tab[i];
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
if (br->br_flags & FLAG_G)
BWN_RF_WRITE(mac, br->br_offset, br->br_valueg);
} else {
if (br->br_flags & FLAG_A)
BWN_RF_WRITE(mac, br->br_offset, br->br_valuea);
}
}
#undef FLAG_A
#undef FLAG_B
}
static void
bwn_phy_lp_b2063_tblinit(struct bwn_mac *mac)
{
#define FLAG_A 0x01
#define FLAG_G 0x02
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
static const struct bwn_b206x_rfinit_entry bwn_b2063_init_tab[] = {
{ BWN_B2063_COM1, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM10, 0x1, 0x0, FLAG_A, },
{ BWN_B2063_COM16, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM17, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM18, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM19, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM20, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM21, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM22, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM23, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_COM24, 0x0, 0x0, FLAG_G, },
{ BWN_B2063_LOGEN_SP1, 0xe8, 0xd4, FLAG_A | FLAG_G, },
{ BWN_B2063_LOGEN_SP2, 0xa7, 0x53, FLAG_A | FLAG_G, },
{ BWN_B2063_LOGEN_SP4, 0xf0, 0xf, FLAG_A | FLAG_G, },
{ BWN_B2063_G_RX_SP1, 0x1f, 0x5e, FLAG_G, },
{ BWN_B2063_G_RX_SP2, 0x7f, 0x7e, FLAG_G, },
{ BWN_B2063_G_RX_SP3, 0x30, 0xf0, FLAG_G, },
{ BWN_B2063_G_RX_SP7, 0x7f, 0x7f, FLAG_A | FLAG_G, },
{ BWN_B2063_G_RX_SP10, 0xc, 0xc, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_SP1, 0x3c, 0x3f, FLAG_A, },
{ BWN_B2063_A_RX_SP2, 0xfc, 0xfe, FLAG_A, },
{ BWN_B2063_A_RX_SP7, 0x8, 0x8, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_BB_SP4, 0x60, 0x60, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_BB_SP8, 0x30, 0x30, FLAG_A | FLAG_G, },
{ BWN_B2063_TX_RF_SP3, 0xc, 0xb, FLAG_A | FLAG_G, },
{ BWN_B2063_TX_RF_SP4, 0x10, 0xf, FLAG_A | FLAG_G, },
{ BWN_B2063_PA_SP1, 0x3d, 0xfd, FLAG_A | FLAG_G, },
{ BWN_B2063_TX_BB_SP1, 0x2, 0x2, FLAG_A | FLAG_G, },
{ BWN_B2063_BANDGAP_CTL1, 0x56, 0x56, FLAG_A | FLAG_G, },
{ BWN_B2063_JTAG_VCO2, 0xF7, 0xF7, FLAG_A | FLAG_G, },
{ BWN_B2063_G_RX_MIX3, 0x71, 0x71, FLAG_A | FLAG_G, },
{ BWN_B2063_G_RX_MIX4, 0x71, 0x71, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_1ST2, 0xf0, 0x30, FLAG_A, },
{ BWN_B2063_A_RX_PS6, 0x77, 0x77, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_MIX4, 0x3, 0x3, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_MIX5, 0xf, 0xf, FLAG_A | FLAG_G, },
{ BWN_B2063_A_RX_MIX6, 0xf, 0xf, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_TIA_CTL1, 0x77, 0x77, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_TIA_CTL3, 0x77, 0x77, FLAG_A | FLAG_G, },
{ BWN_B2063_RX_BB_CTL2, 0x4, 0x4, FLAG_A | FLAG_G, },
{ BWN_B2063_PA_CTL1, 0x0, 0x4, FLAG_A, },
{ BWN_B2063_VREG_CTL1, 0x3, 0x3, FLAG_A | FLAG_G, },
};
const struct bwn_b206x_rfinit_entry *br;
unsigned int i;
for (i = 0; i < N(bwn_b2063_init_tab); i++) {
br = &bwn_b2063_init_tab[i];
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
if (br->br_flags & FLAG_G)
BWN_RF_WRITE(mac, br->br_offset, br->br_valueg);
} else {
if (br->br_flags & FLAG_A)
BWN_RF_WRITE(mac, br->br_offset, br->br_valuea);
}
}
#undef FLAG_A
#undef FLAG_B
}
static void
bwn_tab_read_multi(struct bwn_mac *mac, uint32_t typenoffset,
int count, void *_data)
{
unsigned int i;
uint32_t offset, type;
uint8_t *data = _data;
type = BWN_TAB_GETTYPE(typenoffset);
offset = BWN_TAB_GETOFFSET(typenoffset);
KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
for (i = 0; i < count; i++) {
switch (type) {
case BWN_TAB_8BIT:
*data = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff;
data++;
break;
case BWN_TAB_16BIT:
*((uint16_t *)data) = BWN_PHY_READ(mac,
BWN_PHY_TABLEDATALO);
data += 2;
break;
case BWN_TAB_32BIT:
*((uint32_t *)data) = BWN_PHY_READ(mac,
BWN_PHY_TABLEDATAHI);
*((uint32_t *)data) <<= 16;
*((uint32_t *)data) |= BWN_PHY_READ(mac,
BWN_PHY_TABLEDATALO);
data += 4;
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
}
static void
bwn_tab_write_multi(struct bwn_mac *mac, uint32_t typenoffset,
int count, const void *_data)
{
uint32_t offset, type, value;
const uint8_t *data = _data;
unsigned int i;
type = BWN_TAB_GETTYPE(typenoffset);
offset = BWN_TAB_GETOFFSET(typenoffset);
KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
for (i = 0; i < count; i++) {
switch (type) {
case BWN_TAB_8BIT:
value = *data;
data++;
KASSERT(!(value & ~0xff),
("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
case BWN_TAB_16BIT:
value = *((const uint16_t *)data);
data += 2;
KASSERT(!(value & ~0xffff),
("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
case BWN_TAB_32BIT:
value = *((const uint32_t *)data);
data += 4;
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
}
static struct bwn_txgain
bwn_phy_lp_get_txgain(struct bwn_mac *mac)
{
struct bwn_txgain tg;
uint16_t tmp;
tg.tg_dac = (BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0x380) >> 7;
if (mac->mac_phy.rev < 2) {
tmp = BWN_PHY_READ(mac,
BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL) & 0x7ff;
tg.tg_gm = tmp & 0x0007;
tg.tg_pga = (tmp & 0x0078) >> 3;
tg.tg_pad = (tmp & 0x780) >> 7;
return (tg);
}
tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL);
tg.tg_pad = BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0xff;
tg.tg_gm = tmp & 0xff;
tg.tg_pga = (tmp >> 8) & 0xff;
return (tg);
}
static uint8_t
bwn_phy_lp_get_bbmult(struct bwn_mac *mac)
{
return (bwn_tab_read(mac, BWN_TAB_2(0, 87)) & 0xff00) >> 8;
}
static void
bwn_phy_lp_set_txgain(struct bwn_mac *mac, struct bwn_txgain *tg)
{
uint16_t pa;
if (mac->mac_phy.rev < 2) {
BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800,
(tg->tg_pad << 7) | (tg->tg_pga << 3) | tg->tg_gm);
bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac);
bwn_phy_lp_set_txgain_override(mac);
return;
}
pa = bwn_phy_lp_get_pa_gain(mac);
BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL,
(tg->tg_pga << 8) | tg->tg_gm);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0x8000,
tg->tg_pad | (pa << 6));
BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xfc), (tg->tg_pga << 8) | tg->tg_gm);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x8000,
tg->tg_pad | (pa << 8));
bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac);
bwn_phy_lp_set_txgain_override(mac);
}
static void
bwn_phy_lp_set_bbmult(struct bwn_mac *mac, uint8_t bbmult)
{
bwn_tab_write(mac, BWN_TAB_2(0, 87), (uint16_t)bbmult << 8);
}
static void
bwn_phy_lp_set_trsw_over(struct bwn_mac *mac, uint8_t tx, uint8_t rx)
{
uint16_t trsw = (tx << 1) | rx;
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffc, trsw);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x3);
}
static void
bwn_phy_lp_set_rxgain(struct bwn_mac *mac, uint32_t gain)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t ext_lna, high_gain, lna, low_gain, trsw, tmp;
if (mac->mac_phy.rev < 2) {
trsw = gain & 0x1;
lna = (gain & 0xfffc) | ((gain & 0xc) >> 2);
ext_lna = (gain & 2) >> 1;
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xfbff, ext_lna << 10);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xf7ff, ext_lna << 11);
BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, lna);
} else {
low_gain = gain & 0xffff;
high_gain = (gain >> 16) & 0xf;
ext_lna = (gain >> 21) & 0x1;
trsw = ~(gain >> 20) & 0x1;
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xfdff, ext_lna << 9);
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xfbff, ext_lna << 10);
BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff0, high_gain);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
tmp = (gain >> 2) & 0x3;
BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL,
0xe7ff, tmp<<11);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe6), 0xffe7,
tmp << 3);
}
}
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40);
if (mac->mac_phy.rev >= 2) {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x400);
BWN_PHY_SET(mac, BWN_PHY_OFDM(0xe5), 0x8);
}
return;
}
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x200);
}
static void
bwn_phy_lp_set_deaf(struct bwn_mac *mac, uint8_t user)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
if (user)
plp->plp_crsusr_off = 1;
else
plp->plp_crssys_off = 1;
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x80);
}
static void
bwn_phy_lp_clear_deaf(struct bwn_mac *mac, uint8_t user)
{
struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp;
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
if (user)
plp->plp_crsusr_off = 0;
else
plp->plp_crssys_off = 0;
if (plp->plp_crsusr_off || plp->plp_crssys_off)
return;
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x60);
else
BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x20);
}
static unsigned int
bwn_sqrt(struct bwn_mac *mac, unsigned int x)
{
/* Table holding (10 * sqrt(x)) for x between 1 and 256. */
static uint8_t sqrt_table[256] = {
10, 14, 17, 20, 22, 24, 26, 28,
30, 31, 33, 34, 36, 37, 38, 40,
41, 42, 43, 44, 45, 46, 47, 48,
50, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 60, 61, 62, 63,
64, 64, 65, 66, 67, 67, 68, 69,
70, 70, 71, 72, 72, 73, 74, 74,
75, 76, 76, 77, 78, 78, 79, 80,
80, 81, 81, 82, 83, 83, 84, 84,
85, 86, 86, 87, 87, 88, 88, 89,
90, 90, 91, 91, 92, 92, 93, 93,
94, 94, 95, 95, 96, 96, 97, 97,
98, 98, 99, 100, 100, 100, 101, 101,
102, 102, 103, 103, 104, 104, 105, 105,
106, 106, 107, 107, 108, 108, 109, 109,
110, 110, 110, 111, 111, 112, 112, 113,
113, 114, 114, 114, 115, 115, 116, 116,
117, 117, 117, 118, 118, 119, 119, 120,
120, 120, 121, 121, 122, 122, 122, 123,
123, 124, 124, 124, 125, 125, 126, 126,
126, 127, 127, 128, 128, 128, 129, 129,
130, 130, 130, 131, 131, 131, 132, 132,
133, 133, 133, 134, 134, 134, 135, 135,
136, 136, 136, 137, 137, 137, 138, 138,
138, 139, 139, 140, 140, 140, 141, 141,
141, 142, 142, 142, 143, 143, 143, 144,
144, 144, 145, 145, 145, 146, 146, 146,
147, 147, 147, 148, 148, 148, 149, 149,
150, 150, 150, 150, 151, 151, 151, 152,
152, 152, 153, 153, 153, 154, 154, 154,
155, 155, 155, 156, 156, 156, 157, 157,
157, 158, 158, 158, 159, 159, 159, 160
};
if (x == 0)
return (0);
if (x >= 256) {
unsigned int tmp;
for (tmp = 0; x >= (2 * tmp) + 1; x -= (2 * tmp++) + 1)
/* do nothing */ ;
return (tmp);
}
return (sqrt_table[x - 1] / 10);
}
static int
bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *mac, uint16_t sample)
{
#define CALC_COEFF(_v, _x, _y, _z) do { \
int _t; \
_t = _x - 20; \
if (_t >= 0) { \
_v = ((_y << (30 - _x)) + (_z >> (1 + _t))) / (_z >> _t); \
} else { \
_v = ((_y << (30 - _x)) + (_z << (-1 - _t))) / (_z << -_t); \
} \
} while (0)
#define CALC_COEFF2(_v, _x, _y, _z) do { \
int _t; \
_t = _x - 11; \
if (_t >= 0) \
_v = (_y << (31 - _x)) / (_z >> _t); \
else \
_v = (_y << (31 - _x)) / (_z << -_t); \
} while (0)
struct bwn_phy_lp_iq_est ie;
uint16_t v0, v1;
int tmp[2], ret;
v1 = BWN_PHY_READ(mac, BWN_PHY_RX_COMP_COEFF_S);
v0 = v1 >> 8;
v1 |= 0xff;
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, 0x00c0);
BWN_PHY_MASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff);
ret = bwn_phy_lp_rx_iq_est(mac, sample, 32, &ie);
if (ret == 0)
goto done;
if (ie.ie_ipwr + ie.ie_qpwr < 2) {
ret = 0;
goto done;
}
CALC_COEFF(tmp[0], bwn_nbits(ie.ie_iqprod), ie.ie_iqprod, ie.ie_ipwr);
CALC_COEFF2(tmp[1], bwn_nbits(ie.ie_qpwr), ie.ie_qpwr, ie.ie_ipwr);
tmp[1] = -bwn_sqrt(mac, tmp[1] - (tmp[0] * tmp[0]));
v0 = tmp[0] >> 3;
v1 = tmp[1] >> 4;
done:
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, v1);
BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, v0 << 8);
return ret;
#undef CALC_COEFF
#undef CALC_COEFF2
}
static void
bwn_phy_lp_tblinit_r01(struct bwn_mac *mac)
{
static const uint16_t noisescale[] = {
0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4,
0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa400, 0xa4a4, 0xa4a4,
0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4,
0xa4a4, 0xa4a4, 0x00a4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x4c00, 0x2d36, 0x0000, 0x0000, 0x4c00, 0x2d36,
};
static const uint16_t crsgainnft[] = {
0x0366, 0x036a, 0x036f, 0x0364, 0x0367, 0x036d, 0x0374, 0x037f,
0x036f, 0x037b, 0x038a, 0x0378, 0x0367, 0x036d, 0x0375, 0x0381,
0x0374, 0x0381, 0x0392, 0x03a9, 0x03c4, 0x03e1, 0x0001, 0x001f,
0x0040, 0x005e, 0x007f, 0x009e, 0x00bd, 0x00dd, 0x00fd, 0x011d,
0x013d,
};
static const uint16_t filterctl[] = {
0xa0fc, 0x10fc, 0x10db, 0x20b7, 0xff93, 0x10bf, 0x109b, 0x2077,
0xff53, 0x0127,
};
static const uint32_t psctl[] = {
0x00010000, 0x000000a0, 0x00040000, 0x00000048, 0x08080101,
0x00000080, 0x08080101, 0x00000040, 0x08080101, 0x000000c0,
0x08a81501, 0x000000c0, 0x0fe8fd01, 0x000000c0, 0x08300105,
0x000000c0, 0x08080201, 0x000000c0, 0x08280205, 0x000000c0,
0xe80802fe, 0x000000c7, 0x28080206, 0x000000c0, 0x08080202,
0x000000c0, 0x0ba87602, 0x000000c0, 0x1068013d, 0x000000c0,
0x10280105, 0x000000c0, 0x08880102, 0x000000c0, 0x08280106,
0x000000c0, 0xe80801fd, 0x000000c7, 0xa8080115, 0x000000c0,
};
static const uint16_t ofdmcckgain_r0[] = {
0x0001, 0x0001, 0x0001, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001,
0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055,
0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d,
0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d,
0x755d,
};
static const uint16_t ofdmcckgain_r1[] = {
0x5000, 0x6000, 0x7000, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001,
0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055,
0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d,
0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d,
0x755d,
};
static const uint16_t gaindelta[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000,
};
static const uint32_t txpwrctl[] = {
0x00000050, 0x0000004f, 0x0000004e, 0x0000004d, 0x0000004c,
0x0000004b, 0x0000004a, 0x00000049, 0x00000048, 0x00000047,
0x00000046, 0x00000045, 0x00000044, 0x00000043, 0x00000042,
0x00000041, 0x00000040, 0x0000003f, 0x0000003e, 0x0000003d,
0x0000003c, 0x0000003b, 0x0000003a, 0x00000039, 0x00000038,
0x00000037, 0x00000036, 0x00000035, 0x00000034, 0x00000033,
0x00000032, 0x00000031, 0x00000030, 0x0000002f, 0x0000002e,
0x0000002d, 0x0000002c, 0x0000002b, 0x0000002a, 0x00000029,
0x00000028, 0x00000027, 0x00000026, 0x00000025, 0x00000024,
0x00000023, 0x00000022, 0x00000021, 0x00000020, 0x0000001f,
0x0000001e, 0x0000001d, 0x0000001c, 0x0000001b, 0x0000001a,
0x00000019, 0x00000018, 0x00000017, 0x00000016, 0x00000015,
0x00000014, 0x00000013, 0x00000012, 0x00000011, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x000075a0, 0x000075a0, 0x000075a1,
0x000075a1, 0x000075a2, 0x000075a2, 0x000075a3, 0x000075a3,
0x000074b0, 0x000074b0, 0x000074b1, 0x000074b1, 0x000074b2,
0x000074b2, 0x000074b3, 0x000074b3, 0x00006d20, 0x00006d20,
0x00006d21, 0x00006d21, 0x00006d22, 0x00006d22, 0x00006d23,
0x00006d23, 0x00004660, 0x00004660, 0x00004661, 0x00004661,
0x00004662, 0x00004662, 0x00004663, 0x00004663, 0x00003e60,
0x00003e60, 0x00003e61, 0x00003e61, 0x00003e62, 0x00003e62,
0x00003e63, 0x00003e63, 0x00003660, 0x00003660, 0x00003661,
0x00003661, 0x00003662, 0x00003662, 0x00003663, 0x00003663,
0x00002e60, 0x00002e60, 0x00002e61, 0x00002e61, 0x00002e62,
0x00002e62, 0x00002e63, 0x00002e63, 0x00002660, 0x00002660,
0x00002661, 0x00002661, 0x00002662, 0x00002662, 0x00002663,
0x00002663, 0x000025e0, 0x000025e0, 0x000025e1, 0x000025e1,
0x000025e2, 0x000025e2, 0x000025e3, 0x000025e3, 0x00001de0,
0x00001de0, 0x00001de1, 0x00001de1, 0x00001de2, 0x00001de2,
0x00001de3, 0x00001de3, 0x00001d60, 0x00001d60, 0x00001d61,
0x00001d61, 0x00001d62, 0x00001d62, 0x00001d63, 0x00001d63,
0x00001560, 0x00001560, 0x00001561, 0x00001561, 0x00001562,
0x00001562, 0x00001563, 0x00001563, 0x00000d60, 0x00000d60,
0x00000d61, 0x00000d61, 0x00000d62, 0x00000d62, 0x00000d63,
0x00000d63, 0x00000ce0, 0x00000ce0, 0x00000ce1, 0x00000ce1,
0x00000ce2, 0x00000ce2, 0x00000ce3, 0x00000ce3, 0x00000e10,
0x00000e10, 0x00000e11, 0x00000e11, 0x00000e12, 0x00000e12,
0x00000e13, 0x00000e13, 0x00000bf0, 0x00000bf0, 0x00000bf1,
0x00000bf1, 0x00000bf2, 0x00000bf2, 0x00000bf3, 0x00000bf3,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000,
0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000,
0x04000000, 0x04200000, 0x04000000, 0x000000ff, 0x000002fc,
0x0000fa08, 0x00000305, 0x00000206, 0x00000304, 0x0000fb04,
0x0000fcff, 0x000005fb, 0x0000fd01, 0x00000401, 0x00000006,
0x0000ff03, 0x000007fc, 0x0000fc08, 0x00000203, 0x0000fffb,
0x00000600, 0x0000fa01, 0x0000fc03, 0x0000fe06, 0x0000fe00,
0x00000102, 0x000007fd, 0x000004fb, 0x000006ff, 0x000004fd,
0x0000fdfa, 0x000007fb, 0x0000fdfa, 0x0000fa06, 0x00000500,
0x0000f902, 0x000007fa, 0x0000fafa, 0x00000500, 0x000007fa,
0x00000700, 0x00000305, 0x000004ff, 0x00000801, 0x00000503,
0x000005f9, 0x00000404, 0x0000fb08, 0x000005fd, 0x00000501,
0x00000405, 0x0000fb03, 0x000007fc, 0x00000403, 0x00000303,
0x00000402, 0x0000faff, 0x0000fe05, 0x000005fd, 0x0000fe01,
0x000007fa, 0x00000202, 0x00000504, 0x00000102, 0x000008fe,
0x0000fa04, 0x0000fafc, 0x0000fe08, 0x000000f9, 0x000002fa,
0x000003fe, 0x00000304, 0x000004f9, 0x00000100, 0x0000fd06,
0x000008fc, 0x00000701, 0x00000504, 0x0000fdfe, 0x0000fdfc,
0x000003fe, 0x00000704, 0x000002fc, 0x000004f9, 0x0000fdfd,
0x0000fa07, 0x00000205, 0x000003fd, 0x000005fb, 0x000004f9,
0x00000804, 0x0000fc06, 0x0000fcf9, 0x00000100, 0x0000fe05,
0x00000408, 0x0000fb02, 0x00000304, 0x000006fe, 0x000004fa,
0x00000305, 0x000008fc, 0x00000102, 0x000001fd, 0x000004fc,
0x0000fe03, 0x00000701, 0x000001fb, 0x000001f9, 0x00000206,
0x000006fd, 0x00000508, 0x00000700, 0x00000304, 0x000005fe,
0x000005ff, 0x0000fa04, 0x00000303, 0x0000fefb, 0x000007f9,
0x0000fefc, 0x000004fd, 0x000005fc, 0x0000fffd, 0x0000fc08,
0x0000fbf9, 0x0000fd07, 0x000008fb, 0x0000fe02, 0x000006fb,
0x00000702,
};
KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__));
bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl),
bwn_tab_sigsq_tbl);
bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale);
bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(crsgainnft), crsgainnft);
bwn_tab_write_multi(mac, BWN_TAB_2(8, 0), N(filterctl), filterctl);
bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(psctl), psctl);
bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl),
bwn_tab_pllfrac_tbl);
bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl),
bwn_tabl_iqlocal_tbl);
if (mac->mac_phy.rev == 0) {
bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r0),
ofdmcckgain_r0);
bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r0),
ofdmcckgain_r0);
} else {
bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r1),
ofdmcckgain_r1);
bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r1),
ofdmcckgain_r1);
}
bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(gaindelta), gaindelta);
bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(txpwrctl), txpwrctl);
}
static void
bwn_phy_lp_tblinit_r2(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
int i;
static const uint16_t noisescale[] = {
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x0000, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4,
0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4
};
static const uint32_t filterctl[] = {
0x000141fc, 0x000021fc, 0x000021b7, 0x0000416f, 0x0001ff27,
0x0000217f, 0x00002137, 0x000040ef, 0x0001fea7, 0x0000024f
};
static const uint32_t psctl[] = {
0x00e38e08, 0x00e08e38, 0x00000000, 0x00000000, 0x00000000,
0x00002080, 0x00006180, 0x00003002, 0x00000040, 0x00002042,
0x00180047, 0x00080043, 0x00000041, 0x000020c1, 0x00046006,
0x00042002, 0x00040000, 0x00002003, 0x00180006, 0x00080002
};
static const uint32_t gainidx[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000,
0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207,
0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001,
0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288,
0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000,
0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794,
0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011,
0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21,
0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019,
0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329,
0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a,
0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082,
0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001,
0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683,
0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000,
0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711,
0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010,
0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c,
0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019,
0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6,
0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a,
0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c,
0x0000001a, 0x64ca55ad, 0x0000001a
};
static const uint16_t auxgainidx[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0001, 0x0002, 0x0004, 0x0016, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002,
0x0004, 0x0016
};
static const uint16_t swctl[] = {
0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028,
0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028,
0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009,
0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018,
0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028,
0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028,
0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009,
0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018
};
static const uint8_t hf[] = {
0x4b, 0x36, 0x24, 0x18, 0x49, 0x34, 0x23, 0x17, 0x48,
0x33, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17
};
static const uint32_t gainval[] = {
0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb,
0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004,
0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012,
0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000,
0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000,
0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003,
0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012,
0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009,
0x000000f1, 0x00000000, 0x00000000
};
static const uint16_t gain[] = {
0x0000, 0x0400, 0x0800, 0x0802, 0x0804, 0x0806, 0x0807, 0x0808,
0x080a, 0x080b, 0x080c, 0x080e, 0x080f, 0x0810, 0x0812, 0x0813,
0x0814, 0x0816, 0x0817, 0x081a, 0x081b, 0x081f, 0x0820, 0x0824,
0x0830, 0x0834, 0x0837, 0x083b, 0x083f, 0x0840, 0x0844, 0x0857,
0x085b, 0x085f, 0x08d7, 0x08db, 0x08df, 0x0957, 0x095b, 0x095f,
0x0b57, 0x0b5b, 0x0b5f, 0x0f5f, 0x135f, 0x175f, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000
};
static const uint32_t papdeps[] = {
0x00000000, 0x00013ffc, 0x0001dff3, 0x0001bff0, 0x00023fe9,
0x00021fdf, 0x00028fdf, 0x00033fd2, 0x00039fcb, 0x00043fc7,
0x0004efc2, 0x00055fb5, 0x0005cfb0, 0x00063fa8, 0x00068fa3,
0x00071f98, 0x0007ef92, 0x00084f8b, 0x0008df82, 0x00097f77,
0x0009df69, 0x000a3f62, 0x000adf57, 0x000b6f4c, 0x000bff41,
0x000c9f39, 0x000cff30, 0x000dbf27, 0x000e4f1e, 0x000edf16,
0x000f7f13, 0x00102f11, 0x00110f10, 0x0011df11, 0x0012ef15,
0x00143f1c, 0x00158f27, 0x00172f35, 0x00193f47, 0x001baf5f,
0x001e6f7e, 0x0021cfa4, 0x0025bfd2, 0x002a2008, 0x002fb047,
0x00360090, 0x003d40e0, 0x0045c135, 0x004fb189, 0x005ae1d7,
0x0067221d, 0x0075025a, 0x007ff291, 0x007ff2bf, 0x007ff2e3,
0x007ff2ff, 0x007ff315, 0x007ff329, 0x007ff33f, 0x007ff356,
0x007ff36e, 0x007ff39c, 0x007ff441, 0x007ff506
};
static const uint32_t papdmult[] = {
0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060,
0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080,
0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa,
0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3,
0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f,
0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193,
0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a,
0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd,
0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd,
0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc,
0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5,
0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd,
0x00036963, 0x000339f2, 0x00030a89, 0x0002db28
};
static const uint32_t gainidx_a0[] = {
0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060,
0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080,
0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa,
0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3,
0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f,
0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193,
0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a,
0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd,
0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd,
0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc,
0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5,
0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd,
0x00036963, 0x000339f2, 0x00030a89, 0x0002db28
};
static const uint16_t auxgainidx_a0[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0002, 0x0014, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0002, 0x0014
};
static const uint32_t gainval_a0[] = {
0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb,
0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004,
0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012,
0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000,
0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000,
0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003,
0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012,
0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f,
0x000000f7, 0x00000000, 0x00000000
};
static const uint16_t gain_a0[] = {
0x0000, 0x0002, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b,
0x000c, 0x000e, 0x000f, 0x0010, 0x0012, 0x0013, 0x0014, 0x0016,
0x0017, 0x001a, 0x001b, 0x001f, 0x0020, 0x0024, 0x0030, 0x0034,
0x0037, 0x003b, 0x003f, 0x0040, 0x0044, 0x0057, 0x005b, 0x005f,
0x00d7, 0x00db, 0x00df, 0x0157, 0x015b, 0x015f, 0x0357, 0x035b,
0x035f, 0x075f, 0x0b5f, 0x0f5f, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000
};
KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__));
for (i = 0; i < 704; i++)
bwn_tab_write(mac, BWN_TAB_4(7, i), 0);
bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl),
bwn_tab_sigsq_tbl);
bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale);
bwn_tab_write_multi(mac, BWN_TAB_4(11, 0), N(filterctl), filterctl);
bwn_tab_write_multi(mac, BWN_TAB_4(12, 0), N(psctl), psctl);
bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx), gainidx);
bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx), auxgainidx);
bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(swctl), swctl);
bwn_tab_write_multi(mac, BWN_TAB_1(16, 0), N(hf), hf);
bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval), gainval);
bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain), gain);
bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl),
bwn_tab_pllfrac_tbl);
bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl),
bwn_tabl_iqlocal_tbl);
bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(papdeps), papdeps);
bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(papdmult), papdmult);
if ((siba_get_chipid(sc->sc_dev) == 0x4325) &&
(siba_get_chiprev(sc->sc_dev) == 0)) {
bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx_a0),
gainidx_a0);
bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx_a0),
auxgainidx_a0);
bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval_a0),
gainval_a0);
bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain_a0), gain_a0);
}
}
static void
bwn_phy_lp_tblinit_txgain(struct bwn_mac *mac)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
static struct bwn_txgain_entry txgain_r2[] = {
{ 255, 255, 203, 0, 152 }, { 255, 255, 203, 0, 147 },
{ 255, 255, 203, 0, 143 }, { 255, 255, 203, 0, 139 },
{ 255, 255, 203, 0, 135 }, { 255, 255, 203, 0, 131 },
{ 255, 255, 203, 0, 128 }, { 255, 255, 203, 0, 124 },
{ 255, 255, 203, 0, 121 }, { 255, 255, 203, 0, 117 },
{ 255, 255, 203, 0, 114 }, { 255, 255, 203, 0, 111 },
{ 255, 255, 203, 0, 107 }, { 255, 255, 203, 0, 104 },
{ 255, 255, 203, 0, 101 }, { 255, 255, 203, 0, 99 },
{ 255, 255, 203, 0, 96 }, { 255, 255, 203, 0, 93 },
{ 255, 255, 203, 0, 90 }, { 255, 255, 203, 0, 88 },
{ 255, 255, 203, 0, 85 }, { 255, 255, 203, 0, 83 },
{ 255, 255, 203, 0, 81 }, { 255, 255, 203, 0, 78 },
{ 255, 255, 203, 0, 76 }, { 255, 255, 203, 0, 74 },
{ 255, 255, 203, 0, 72 }, { 255, 255, 203, 0, 70 },
{ 255, 255, 203, 0, 68 }, { 255, 255, 203, 0, 66 },
{ 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 },
{ 255, 255, 192, 0, 64 }, { 255, 255, 186, 0, 64 },
{ 255, 255, 181, 0, 64 }, { 255, 255, 176, 0, 64 },
{ 255, 255, 171, 0, 64 }, { 255, 255, 166, 0, 64 },
{ 255, 255, 161, 0, 64 }, { 255, 255, 157, 0, 64 },
{ 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 },
{ 255, 255, 144, 0, 64 }, { 255, 255, 140, 0, 64 },
{ 255, 255, 136, 0, 64 }, { 255, 255, 132, 0, 64 },
{ 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 },
{ 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 },
{ 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 },
{ 255, 255, 108, 0, 64 }, { 255, 255, 105, 0, 64 },
{ 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 },
{ 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 },
{ 255, 255, 91, 0, 64 }, { 255, 255, 88, 0, 64 },
{ 255, 255, 86, 0, 64 }, { 255, 255, 83, 0, 64 },
{ 255, 255, 81, 0, 64 }, { 255, 255, 79, 0, 64 },
{ 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 },
{ 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 },
{ 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 },
{ 255, 255, 64, 0, 64 }, { 255, 248, 64, 0, 64 },
{ 255, 248, 62, 0, 64 }, { 255, 241, 62, 0, 64 },
{ 255, 241, 60, 0, 64 }, { 255, 234, 60, 0, 64 },
{ 255, 234, 59, 0, 64 }, { 255, 227, 59, 0, 64 },
{ 255, 227, 57, 0, 64 }, { 255, 221, 57, 0, 64 },
{ 255, 221, 55, 0, 64 }, { 255, 215, 55, 0, 64 },
{ 255, 215, 54, 0, 64 }, { 255, 208, 54, 0, 64 },
{ 255, 208, 52, 0, 64 }, { 255, 203, 52, 0, 64 },
{ 255, 203, 51, 0, 64 }, { 255, 197, 51, 0, 64 },
{ 255, 197, 49, 0, 64 }, { 255, 191, 49, 0, 64 },
{ 255, 191, 48, 0, 64 }, { 255, 186, 48, 0, 64 },
{ 255, 186, 47, 0, 64 }, { 255, 181, 47, 0, 64 },
{ 255, 181, 45, 0, 64 }, { 255, 175, 45, 0, 64 },
{ 255, 175, 44, 0, 64 }, { 255, 170, 44, 0, 64 },
{ 255, 170, 43, 0, 64 }, { 255, 166, 43, 0, 64 },
{ 255, 166, 42, 0, 64 }, { 255, 161, 42, 0, 64 },
{ 255, 161, 40, 0, 64 }, { 255, 156, 40, 0, 64 },
{ 255, 156, 39, 0, 64 }, { 255, 152, 39, 0, 64 },
{ 255, 152, 38, 0, 64 }, { 255, 148, 38, 0, 64 },
{ 255, 148, 37, 0, 64 }, { 255, 143, 37, 0, 64 },
{ 255, 143, 36, 0, 64 }, { 255, 139, 36, 0, 64 },
{ 255, 139, 35, 0, 64 }, { 255, 135, 35, 0, 64 },
{ 255, 135, 34, 0, 64 }, { 255, 132, 34, 0, 64 },
{ 255, 132, 33, 0, 64 }, { 255, 128, 33, 0, 64 },
{ 255, 128, 32, 0, 64 }, { 255, 124, 32, 0, 64 },
{ 255, 124, 31, 0, 64 }, { 255, 121, 31, 0, 64 },
{ 255, 121, 30, 0, 64 }, { 255, 117, 30, 0, 64 },
{ 255, 117, 29, 0, 64 }, { 255, 114, 29, 0, 64 },
{ 255, 114, 29, 0, 64 }, { 255, 111, 29, 0, 64 },
};
static struct bwn_txgain_entry txgain_2ghz_r2[] = {
{ 7, 99, 255, 0, 64 }, { 7, 96, 255, 0, 64 },
{ 7, 93, 255, 0, 64 }, { 7, 90, 255, 0, 64 },
{ 7, 88, 255, 0, 64 }, { 7, 85, 255, 0, 64 },
{ 7, 83, 255, 0, 64 }, { 7, 81, 255, 0, 64 },
{ 7, 78, 255, 0, 64 }, { 7, 76, 255, 0, 64 },
{ 7, 74, 255, 0, 64 }, { 7, 72, 255, 0, 64 },
{ 7, 70, 255, 0, 64 }, { 7, 68, 255, 0, 64 },
{ 7, 66, 255, 0, 64 }, { 7, 64, 255, 0, 64 },
{ 7, 64, 255, 0, 64 }, { 7, 62, 255, 0, 64 },
{ 7, 62, 248, 0, 64 }, { 7, 60, 248, 0, 64 },
{ 7, 60, 241, 0, 64 }, { 7, 59, 241, 0, 64 },
{ 7, 59, 234, 0, 64 }, { 7, 57, 234, 0, 64 },
{ 7, 57, 227, 0, 64 }, { 7, 55, 227, 0, 64 },
{ 7, 55, 221, 0, 64 }, { 7, 54, 221, 0, 64 },
{ 7, 54, 215, 0, 64 }, { 7, 52, 215, 0, 64 },
{ 7, 52, 208, 0, 64 }, { 7, 51, 208, 0, 64 },
{ 7, 51, 203, 0, 64 }, { 7, 49, 203, 0, 64 },
{ 7, 49, 197, 0, 64 }, { 7, 48, 197, 0, 64 },
{ 7, 48, 191, 0, 64 }, { 7, 47, 191, 0, 64 },
{ 7, 47, 186, 0, 64 }, { 7, 45, 186, 0, 64 },
{ 7, 45, 181, 0, 64 }, { 7, 44, 181, 0, 64 },
{ 7, 44, 175, 0, 64 }, { 7, 43, 175, 0, 64 },
{ 7, 43, 170, 0, 64 }, { 7, 42, 170, 0, 64 },
{ 7, 42, 166, 0, 64 }, { 7, 40, 166, 0, 64 },
{ 7, 40, 161, 0, 64 }, { 7, 39, 161, 0, 64 },
{ 7, 39, 156, 0, 64 }, { 7, 38, 156, 0, 64 },
{ 7, 38, 152, 0, 64 }, { 7, 37, 152, 0, 64 },
{ 7, 37, 148, 0, 64 }, { 7, 36, 148, 0, 64 },
{ 7, 36, 143, 0, 64 }, { 7, 35, 143, 0, 64 },
{ 7, 35, 139, 0, 64 }, { 7, 34, 139, 0, 64 },
{ 7, 34, 135, 0, 64 }, { 7, 33, 135, 0, 64 },
{ 7, 33, 132, 0, 64 }, { 7, 32, 132, 0, 64 },
{ 7, 32, 128, 0, 64 }, { 7, 31, 128, 0, 64 },
{ 7, 31, 124, 0, 64 }, { 7, 30, 124, 0, 64 },
{ 7, 30, 121, 0, 64 }, { 7, 29, 121, 0, 64 },
{ 7, 29, 117, 0, 64 }, { 7, 29, 117, 0, 64 },
{ 7, 29, 114, 0, 64 }, { 7, 28, 114, 0, 64 },
{ 7, 28, 111, 0, 64 }, { 7, 27, 111, 0, 64 },
{ 7, 27, 108, 0, 64 }, { 7, 26, 108, 0, 64 },
{ 7, 26, 104, 0, 64 }, { 7, 25, 104, 0, 64 },
{ 7, 25, 102, 0, 64 }, { 7, 25, 102, 0, 64 },
{ 7, 25, 99, 0, 64 }, { 7, 24, 99, 0, 64 },
{ 7, 24, 96, 0, 64 }, { 7, 23, 96, 0, 64 },
{ 7, 23, 93, 0, 64 }, { 7, 23, 93, 0, 64 },
{ 7, 23, 90, 0, 64 }, { 7, 22, 90, 0, 64 },
{ 7, 22, 88, 0, 64 }, { 7, 21, 88, 0, 64 },
{ 7, 21, 85, 0, 64 }, { 7, 21, 85, 0, 64 },
{ 7, 21, 83, 0, 64 }, { 7, 20, 83, 0, 64 },
{ 7, 20, 81, 0, 64 }, { 7, 20, 81, 0, 64 },
{ 7, 20, 78, 0, 64 }, { 7, 19, 78, 0, 64 },
{ 7, 19, 76, 0, 64 }, { 7, 19, 76, 0, 64 },
{ 7, 19, 74, 0, 64 }, { 7, 18, 74, 0, 64 },
{ 7, 18, 72, 0, 64 }, { 7, 18, 72, 0, 64 },
{ 7, 18, 70, 0, 64 }, { 7, 17, 70, 0, 64 },
{ 7, 17, 68, 0, 64 }, { 7, 17, 68, 0, 64 },
{ 7, 17, 66, 0, 64 }, { 7, 16, 66, 0, 64 },
{ 7, 16, 64, 0, 64 }, { 7, 16, 64, 0, 64 },
{ 7, 16, 62, 0, 64 }, { 7, 15, 62, 0, 64 },
{ 7, 15, 60, 0, 64 }, { 7, 15, 60, 0, 64 },
{ 7, 15, 59, 0, 64 }, { 7, 14, 59, 0, 64 },
{ 7, 14, 57, 0, 64 }, { 7, 14, 57, 0, 64 },
{ 7, 14, 55, 0, 64 }, { 7, 14, 55, 0, 64 },
{ 7, 14, 54, 0, 64 }, { 7, 13, 54, 0, 64 },
{ 7, 13, 52, 0, 64 }, { 7, 13, 52, 0, 64 },
};
static struct bwn_txgain_entry txgain_5ghz_r2[] = {
{ 255, 255, 255, 0, 152 }, { 255, 255, 255, 0, 147 },
{ 255, 255, 255, 0, 143 }, { 255, 255, 255, 0, 139 },
{ 255, 255, 255, 0, 135 }, { 255, 255, 255, 0, 131 },
{ 255, 255, 255, 0, 128 }, { 255, 255, 255, 0, 124 },
{ 255, 255, 255, 0, 121 }, { 255, 255, 255, 0, 117 },
{ 255, 255, 255, 0, 114 }, { 255, 255, 255, 0, 111 },
{ 255, 255, 255, 0, 107 }, { 255, 255, 255, 0, 104 },
{ 255, 255, 255, 0, 101 }, { 255, 255, 255, 0, 99 },
{ 255, 255, 255, 0, 96 }, { 255, 255, 255, 0, 93 },
{ 255, 255, 255, 0, 90 }, { 255, 255, 255, 0, 88 },
{ 255, 255, 255, 0, 85 }, { 255, 255, 255, 0, 83 },
{ 255, 255, 255, 0, 81 }, { 255, 255, 255, 0, 78 },
{ 255, 255, 255, 0, 76 }, { 255, 255, 255, 0, 74 },
{ 255, 255, 255, 0, 72 }, { 255, 255, 255, 0, 70 },
{ 255, 255, 255, 0, 68 }, { 255, 255, 255, 0, 66 },
{ 255, 255, 255, 0, 64 }, { 255, 255, 248, 0, 64 },
{ 255, 255, 241, 0, 64 }, { 255, 255, 234, 0, 64 },
{ 255, 255, 227, 0, 64 }, { 255, 255, 221, 0, 64 },
{ 255, 255, 215, 0, 64 }, { 255, 255, 208, 0, 64 },
{ 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 },
{ 255, 255, 191, 0, 64 }, { 255, 255, 186, 0, 64 },
{ 255, 255, 181, 0, 64 }, { 255, 255, 175, 0, 64 },
{ 255, 255, 170, 0, 64 }, { 255, 255, 166, 0, 64 },
{ 255, 255, 161, 0, 64 }, { 255, 255, 156, 0, 64 },
{ 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 },
{ 255, 255, 143, 0, 64 }, { 255, 255, 139, 0, 64 },
{ 255, 255, 135, 0, 64 }, { 255, 255, 132, 0, 64 },
{ 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 },
{ 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 },
{ 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 },
{ 255, 255, 108, 0, 64 }, { 255, 255, 104, 0, 64 },
{ 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 },
{ 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 },
{ 255, 255, 90, 0, 64 }, { 255, 255, 88, 0, 64 },
{ 255, 255, 85, 0, 64 }, { 255, 255, 83, 0, 64 },
{ 255, 255, 81, 0, 64 }, { 255, 255, 78, 0, 64 },
{ 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 },
{ 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 },
{ 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 },
{ 255, 255, 64, 0, 64 }, { 255, 255, 64, 0, 64 },
{ 255, 255, 62, 0, 64 }, { 255, 248, 62, 0, 64 },
{ 255, 248, 60, 0, 64 }, { 255, 241, 60, 0, 64 },
{ 255, 241, 59, 0, 64 }, { 255, 234, 59, 0, 64 },
{ 255, 234, 57, 0, 64 }, { 255, 227, 57, 0, 64 },
{ 255, 227, 55, 0, 64 }, { 255, 221, 55, 0, 64 },
{ 255, 221, 54, 0, 64 }, { 255, 215, 54, 0, 64 },
{ 255, 215, 52, 0, 64 }, { 255, 208, 52, 0, 64 },
{ 255, 208, 51, 0, 64 }, { 255, 203, 51, 0, 64 },
{ 255, 203, 49, 0, 64 }, { 255, 197, 49, 0, 64 },
{ 255, 197, 48, 0, 64 }, { 255, 191, 48, 0, 64 },
{ 255, 191, 47, 0, 64 }, { 255, 186, 47, 0, 64 },
{ 255, 186, 45, 0, 64 }, { 255, 181, 45, 0, 64 },
{ 255, 181, 44, 0, 64 }, { 255, 175, 44, 0, 64 },
{ 255, 175, 43, 0, 64 }, { 255, 170, 43, 0, 64 },
{ 255, 170, 42, 0, 64 }, { 255, 166, 42, 0, 64 },
{ 255, 166, 40, 0, 64 }, { 255, 161, 40, 0, 64 },
{ 255, 161, 39, 0, 64 }, { 255, 156, 39, 0, 64 },
{ 255, 156, 38, 0, 64 }, { 255, 152, 38, 0, 64 },
{ 255, 152, 37, 0, 64 }, { 255, 148, 37, 0, 64 },
{ 255, 148, 36, 0, 64 }, { 255, 143, 36, 0, 64 },
{ 255, 143, 35, 0, 64 }, { 255, 139, 35, 0, 64 },
{ 255, 139, 34, 0, 64 }, { 255, 135, 34, 0, 64 },
{ 255, 135, 33, 0, 64 }, { 255, 132, 33, 0, 64 },
{ 255, 132, 32, 0, 64 }, { 255, 128, 32, 0, 64 }
};
static struct bwn_txgain_entry txgain_r0[] = {
{ 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 },
{ 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 },
{ 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 },
{ 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 },
{ 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 },
{ 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 },
{ 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 },
{ 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 },
{ 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 },
{ 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 },
{ 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 },
{ 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 },
{ 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 },
{ 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 },
{ 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 },
{ 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 },
{ 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 },
{ 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 },
{ 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 },
{ 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 },
{ 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 },
{ 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 },
{ 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 },
{ 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 },
{ 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 },
{ 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 },
{ 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 },
{ 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 },
{ 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 },
{ 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 },
{ 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 },
{ 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 },
{ 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 },
{ 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 },
{ 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 },
{ 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 },
{ 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 },
{ 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 },
{ 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 },
{ 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 },
{ 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 },
{ 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 },
{ 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 },
{ 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 },
{ 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 },
{ 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 },
{ 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 },
{ 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 },
{ 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 },
{ 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 },
{ 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 },
{ 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 },
{ 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 },
{ 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 },
{ 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 },
{ 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 },
{ 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 },
{ 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 },
{ 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 },
{ 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 },
{ 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 },
{ 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 },
{ 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 },
{ 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 }
};
static struct bwn_txgain_entry txgain_2ghz_r0[] = {
{ 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 },
{ 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 },
{ 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 },
{ 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 },
{ 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 },
{ 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 },
{ 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 },
{ 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 },
{ 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 },
{ 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 },
{ 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 },
{ 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 },
{ 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 },
{ 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 },
{ 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 },
{ 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 },
{ 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 },
{ 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 },
{ 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 },
{ 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 },
{ 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 },
{ 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 },
{ 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 },
{ 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 },
{ 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 },
{ 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 },
{ 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 },
{ 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 },
{ 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 },
{ 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 },
{ 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 },
{ 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 },
{ 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 },
{ 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 },
{ 4, 10, 6, 0, 59 }, { 4, 10, 5, 0, 72 },
{ 4, 10, 5, 0, 70 }, { 4, 10, 5, 0, 68 },
{ 4, 10, 5, 0, 66 }, { 4, 10, 5, 0, 64 },
{ 4, 10, 5, 0, 62 }, { 4, 10, 5, 0, 60 },
{ 4, 10, 5, 0, 59 }, { 4, 9, 5, 0, 70 },
{ 4, 9, 5, 0, 68 }, { 4, 9, 5, 0, 66 },
{ 4, 9, 5, 0, 64 }, { 4, 9, 5, 0, 63 },
{ 4, 9, 5, 0, 61 }, { 4, 9, 5, 0, 59 },
{ 4, 9, 4, 0, 71 }, { 4, 9, 4, 0, 69 },
{ 4, 9, 4, 0, 67 }, { 4, 9, 4, 0, 65 },
{ 4, 9, 4, 0, 63 }, { 4, 9, 4, 0, 62 },
{ 4, 9, 4, 0, 60 }, { 4, 9, 4, 0, 58 },
{ 4, 8, 4, 0, 70 }, { 4, 8, 4, 0, 68 },
{ 4, 8, 4, 0, 66 }, { 4, 8, 4, 0, 65 },
{ 4, 8, 4, 0, 63 }, { 4, 8, 4, 0, 61 },
{ 4, 8, 4, 0, 59 }, { 4, 7, 4, 0, 68 },
{ 4, 7, 4, 0, 66 }, { 4, 7, 4, 0, 64 },
{ 4, 7, 4, 0, 62 }, { 4, 7, 4, 0, 61 },
{ 4, 7, 4, 0, 59 }, { 4, 7, 3, 0, 67 },
{ 4, 7, 3, 0, 65 }, { 4, 7, 3, 0, 63 },
{ 4, 7, 3, 0, 62 }, { 4, 7, 3, 0, 60 },
{ 4, 6, 3, 0, 65 }, { 4, 6, 3, 0, 63 },
{ 4, 6, 3, 0, 61 }, { 4, 6, 3, 0, 60 },
{ 4, 6, 3, 0, 58 }, { 4, 5, 3, 0, 68 },
{ 4, 5, 3, 0, 66 }, { 4, 5, 3, 0, 64 },
{ 4, 5, 3, 0, 62 }, { 4, 5, 3, 0, 60 },
{ 4, 5, 3, 0, 59 }, { 4, 5, 3, 0, 57 },
{ 4, 4, 2, 0, 83 }, { 4, 4, 2, 0, 81 },
{ 4, 4, 2, 0, 78 }, { 4, 4, 2, 0, 76 },
{ 4, 4, 2, 0, 74 }, { 4, 4, 2, 0, 72 }
};
static struct bwn_txgain_entry txgain_5ghz_r0[] = {
{ 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 },
{ 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 },
{ 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 },
{ 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 },
{ 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 },
{ 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 },
{ 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 },
{ 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 },
{ 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 },
{ 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 },
{ 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 },
{ 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 },
{ 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 },
{ 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 },
{ 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 },
{ 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 },
{ 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 },
{ 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 },
{ 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 },
{ 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 },
{ 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 },
{ 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 },
{ 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 },
{ 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 },
{ 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 },
{ 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 },
{ 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 },
{ 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 },
{ 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 },
{ 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 },
{ 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 },
{ 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 },
{ 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 },
{ 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 },
{ 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 },
{ 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 },
{ 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 },
{ 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 },
{ 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 },
{ 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 },
{ 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 },
{ 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 },
{ 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 },
{ 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 },
{ 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 },
{ 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 },
{ 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 },
{ 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 },
{ 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 },
{ 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 },
{ 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 },
{ 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 },
{ 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 },
{ 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 },
{ 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 },
{ 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 },
{ 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 },
{ 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 },
{ 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 },
{ 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 },
{ 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 },
{ 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 },
{ 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 },
{ 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 }
};
static struct bwn_txgain_entry txgain_r1[] = {
{ 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 },
{ 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 },
{ 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 },
{ 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 },
{ 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 },
{ 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 },
{ 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 },
{ 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 },
{ 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 },
{ 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 },
{ 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 },
{ 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 },
{ 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 },
{ 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 },
{ 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 },
{ 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 },
{ 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 },
{ 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 },
{ 7, 15, 13, 0, 70 }, { 7, 15, 14, 0, 68 },
{ 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 },
{ 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 },
{ 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 },
{ 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 },
{ 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 },
{ 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 },
{ 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 },
{ 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 },
{ 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 },
{ 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 },
{ 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 },
{ 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 },
{ 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 },
{ 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 },
{ 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 },
{ 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 },
{ 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 },
{ 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 },
{ 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 },
{ 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 },
{ 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 },
{ 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 },
{ 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 },
{ 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 },
{ 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 },
{ 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 },
{ 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 },
{ 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 },
{ 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 },
{ 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 },
{ 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 },
{ 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 },
{ 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 },
{ 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 },
{ 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 },
{ 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 },
{ 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 },
{ 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 },
{ 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 },
{ 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 },
{ 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 },
{ 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 },
{ 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 },
{ 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 },
{ 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 },
{ 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 },
{ 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 },
{ 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 },
{ 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 },
{ 7, 11, 6, 0, 71 }
};
static struct bwn_txgain_entry txgain_2ghz_r1[] = {
{ 4, 15, 15, 0, 90 }, { 4, 15, 15, 0, 88 },
{ 4, 15, 15, 0, 85 }, { 4, 15, 15, 0, 83 },
{ 4, 15, 15, 0, 81 }, { 4, 15, 15, 0, 78 },
{ 4, 15, 15, 0, 76 }, { 4, 15, 15, 0, 74 },
{ 4, 15, 15, 0, 72 }, { 4, 15, 15, 0, 70 },
{ 4, 15, 15, 0, 68 }, { 4, 15, 15, 0, 66 },
{ 4, 15, 15, 0, 64 }, { 4, 15, 15, 0, 62 },
{ 4, 15, 15, 0, 60 }, { 4, 15, 15, 0, 59 },
{ 4, 15, 14, 0, 72 }, { 4, 15, 14, 0, 70 },
{ 4, 15, 14, 0, 68 }, { 4, 15, 14, 0, 66 },
{ 4, 15, 14, 0, 64 }, { 4, 15, 14, 0, 62 },
{ 4, 15, 14, 0, 60 }, { 4, 15, 14, 0, 59 },
{ 4, 15, 13, 0, 72 }, { 4, 15, 13, 0, 70 },
{ 4, 15, 13, 0, 68 }, { 4, 15, 13, 0, 66 },
{ 4, 15, 13, 0, 64 }, { 4, 15, 13, 0, 62 },
{ 4, 15, 13, 0, 60 }, { 4, 15, 13, 0, 59 },
{ 4, 15, 12, 0, 72 }, { 4, 15, 12, 0, 70 },
{ 4, 15, 12, 0, 68 }, { 4, 15, 12, 0, 66 },
{ 4, 15, 12, 0, 64 }, { 4, 15, 12, 0, 62 },
{ 4, 15, 12, 0, 60 }, { 4, 15, 12, 0, 59 },
{ 4, 15, 11, 0, 72 }, { 4, 15, 11, 0, 70 },
{ 4, 15, 11, 0, 68 }, { 4, 15, 11, 0, 66 },
{ 4, 15, 11, 0, 64 }, { 4, 15, 11, 0, 62 },
{ 4, 15, 11, 0, 60 }, { 4, 15, 11, 0, 59 },
{ 4, 15, 10, 0, 72 }, { 4, 15, 10, 0, 70 },
{ 4, 15, 10, 0, 68 }, { 4, 15, 10, 0, 66 },
{ 4, 15, 10, 0, 64 }, { 4, 15, 10, 0, 62 },
{ 4, 15, 10, 0, 60 }, { 4, 15, 10, 0, 59 },
{ 4, 15, 9, 0, 72 }, { 4, 15, 9, 0, 70 },
{ 4, 15, 9, 0, 68 }, { 4, 15, 9, 0, 66 },
{ 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 },
{ 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 },
{ 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 },
{ 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 },
{ 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 },
{ 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 },
{ 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 },
{ 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 },
{ 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 },
{ 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 },
{ 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 },
{ 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 },
{ 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 },
{ 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 },
{ 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 },
{ 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 },
{ 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 },
{ 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 },
{ 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 },
{ 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 },
{ 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 },
{ 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 },
{ 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 },
{ 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 },
{ 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 },
{ 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 },
{ 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 },
{ 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 },
{ 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 },
{ 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 },
{ 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 },
{ 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 },
{ 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 },
{ 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 }
};
static struct bwn_txgain_entry txgain_5ghz_r1[] = {
{ 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 },
{ 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 },
{ 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 },
{ 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 },
{ 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 },
{ 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 },
{ 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 },
{ 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 },
{ 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 },
{ 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 },
{ 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 },
{ 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 },
{ 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 },
{ 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 },
{ 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 },
{ 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 },
{ 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 },
{ 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 },
{ 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 },
{ 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 },
{ 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 },
{ 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 },
{ 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 },
{ 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 },
{ 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 },
{ 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 },
{ 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 },
{ 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 },
{ 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 },
{ 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 },
{ 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 },
{ 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 },
{ 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 },
{ 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 },
{ 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 },
{ 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 },
{ 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 },
{ 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 },
{ 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 },
{ 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 },
{ 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 },
{ 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 },
{ 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 },
{ 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 },
{ 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 },
{ 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 },
{ 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 },
{ 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 },
{ 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 },
{ 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 },
{ 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 },
{ 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 },
{ 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 },
{ 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 },
{ 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 },
{ 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 },
{ 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 },
{ 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 },
{ 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 },
{ 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 },
{ 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 },
{ 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 },
{ 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 },
{ 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 }
};
if (mac->mac_phy.rev != 0 && mac->mac_phy.rev != 1) {
if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA)
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r2);
else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128,
txgain_2ghz_r2);
else
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128,
txgain_5ghz_r2);
return;
}
if (mac->mac_phy.rev == 0) {
if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) ||
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r0);
else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128,
txgain_2ghz_r0);
else
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128,
txgain_5ghz_r0);
return;
}
if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) ||
(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r1);
else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r1);
else
bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r1);
}
static void
bwn_tab_write(struct bwn_mac *mac, uint32_t typeoffset, uint32_t value)
{
uint32_t offset, type;
type = BWN_TAB_GETTYPE(typeoffset);
offset = BWN_TAB_GETOFFSET(typeoffset);
KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__));
switch (type) {
case BWN_TAB_8BIT:
KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
case BWN_TAB_16BIT:
KASSERT(!(value & ~0xffff),
("%s:%d: fail", __func__, __LINE__));
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
case BWN_TAB_32BIT:
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16);
BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
}
}
static int
bwn_phy_lp_loopback(struct bwn_mac *mac)
{
struct bwn_phy_lp_iq_est ie;
int i, index = -1;
uint32_t tmp;
memset(&ie, 0, sizeof(ie));
bwn_phy_lp_set_trsw_over(mac, 1, 1);
BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 1);
BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x8);
BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, 0x80);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x80);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x80);
for (i = 0; i < 32; i++) {
bwn_phy_lp_set_rxgain_idx(mac, i);
bwn_phy_lp_ddfs_turnon(mac, 1, 1, 5, 5, 0);
if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie)))
continue;
tmp = (ie.ie_ipwr + ie.ie_qpwr) / 1000;
if ((tmp > 4000) && (tmp < 10000)) {
index = i;
break;
}
}
bwn_phy_lp_ddfs_turnoff(mac);
return (index);
}
static void
bwn_phy_lp_set_rxgain_idx(struct bwn_mac *mac, uint16_t idx)
{
bwn_phy_lp_set_rxgain(mac, bwn_tab_read(mac, BWN_TAB_2(12, idx)));
}
static void
bwn_phy_lp_ddfs_turnon(struct bwn_mac *mac, int i_on, int q_on,
int incr1, int incr2, int scale_idx)
{
bwn_phy_lp_ddfs_turnoff(mac);
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0xff80);
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0x80ff);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0xff80, incr1);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0x80ff, incr2 << 8);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff7, i_on << 3);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xffef, q_on << 4);
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xff9f, scale_idx << 5);
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffb);
BWN_PHY_SET(mac, BWN_PHY_AFE_DDFS, 0x2);
BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x20);
}
static uint8_t
bwn_phy_lp_rx_iq_est(struct bwn_mac *mac, uint16_t sample, uint8_t time,
struct bwn_phy_lp_iq_est *ie)
{
int i;
BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfff7);
BWN_PHY_WRITE(mac, BWN_PHY_IQ_NUM_SMPLS_ADDR, sample);
BWN_PHY_SETMASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xff00, time);
BWN_PHY_MASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xfeff);
BWN_PHY_SET(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0x200);
for (i = 0; i < 500; i++) {
if (!(BWN_PHY_READ(mac,
BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200))
break;
DELAY(1000);
}
if ((BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) {
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8);
return 0;
}
ie->ie_iqprod = BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_HI_ADDR);
ie->ie_iqprod <<= 16;
ie->ie_iqprod |= BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_LO_ADDR);
ie->ie_ipwr = BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_HI_ADDR);
ie->ie_ipwr <<= 16;
ie->ie_ipwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_LO_ADDR);
ie->ie_qpwr = BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_HI_ADDR);
ie->ie_qpwr <<= 16;
ie->ie_qpwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_LO_ADDR);
BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8);
return 1;
}
static uint32_t
bwn_tab_read(struct bwn_mac *mac, uint32_t typeoffset)
{
uint32_t offset, type, value;
type = BWN_TAB_GETTYPE(typeoffset);
offset = BWN_TAB_GETOFFSET(typeoffset);
KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__));
switch (type) {
case BWN_TAB_8BIT:
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff;
break;
case BWN_TAB_16BIT:
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO);
break;
case BWN_TAB_32BIT:
BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset);
value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI);
value <<= 16;
value |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO);
break;
default:
KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__));
value = 0;
}
return (value);
}
static void
bwn_phy_lp_ddfs_turnoff(struct bwn_mac *mac)
{
BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffd);
BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0xffdf);
}
static void
bwn_phy_lp_set_txgain_dac(struct bwn_mac *mac, uint16_t dac)
{
uint16_t ctl;
ctl = BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0xc7f;
ctl |= dac << 7;
BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf000, ctl);
}
static void
bwn_phy_lp_set_txgain_pa(struct bwn_mac *mac, uint16_t gain)
{
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0xe03f, gain << 6);
BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x80ff, gain << 8);
}
static void
bwn_phy_lp_set_txgain_override(struct bwn_mac *mac)
{
if (mac->mac_phy.rev < 2)
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100);
else {
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x80);
BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x4000);
}
BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x40);
}
static uint16_t
bwn_phy_lp_get_pa_gain(struct bwn_mac *mac)
{
return BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0x7f;
}
static uint8_t
bwn_nbits(int32_t val)
{
uint32_t tmp;
uint8_t nbits = 0;
for (tmp = abs(val); tmp != 0; tmp >>= 1)
nbits++;
return (nbits);
}
static void
bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *mac, int offset, int count,
struct bwn_txgain_entry *table)
{
int i;
for (i = offset; i < count; i++)
bwn_phy_lp_gaintbl_write(mac, i, table[i]);
}
static void
bwn_phy_lp_gaintbl_write(struct bwn_mac *mac, int offset,
struct bwn_txgain_entry data)
{
if (mac->mac_phy.rev >= 2)
bwn_phy_lp_gaintbl_write_r2(mac, offset, data);
else
bwn_phy_lp_gaintbl_write_r01(mac, offset, data);
}
static void
bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *mac, int offset,
struct bwn_txgain_entry te)
{
struct bwn_softc *sc = mac->mac_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
KASSERT(mac->mac_phy.rev >= 2, ("%s:%d: fail", __func__, __LINE__));
tmp = (te.te_pad << 16) | (te.te_pga << 8) | te.te_gm;
if (mac->mac_phy.rev >= 3) {
tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ?
(0x10 << 24) : (0x70 << 24));
} else {
tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ?
(0x14 << 24) : (0x7f << 24));
}
bwn_tab_write(mac, BWN_TAB_4(7, 0xc0 + offset), tmp);
bwn_tab_write(mac, BWN_TAB_4(7, 0x140 + offset),
te.te_bbmult << 20 | te.te_dac << 28);
}
static void
bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *mac, int offset,
struct bwn_txgain_entry te)
{
KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__));
bwn_tab_write(mac, BWN_TAB_4(10, 0xc0 + offset),
(te.te_pad << 11) | (te.te_pga << 7) | (te.te_gm << 4) |
te.te_dac);
bwn_tab_write(mac, BWN_TAB_4(10, 0x140 + offset), te.te_bbmult << 20);
}
static void
bwn_sysctl_node(struct bwn_softc *sc)
{
device_t dev = sc->sc_dev;
struct bwn_mac *mac;
struct bwn_stats *stats;
/* XXX assume that count of MAC is only 1. */
if ((mac = sc->sc_curmac) == NULL)
return;
stats = &mac->mac_stats;
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"linknoise", CTLFLAG_RW, &stats->rts, 0, "Noise level");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"rts", CTLFLAG_RW, &stats->rts, 0, "RTS");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"rtsfail", CTLFLAG_RW, &stats->rtsfail, 0, "RTS failed to send");
#ifdef BWN_DEBUG
SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags");
#endif
}
static device_method_t bwn_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, bwn_probe),
DEVMETHOD(device_attach, bwn_attach),
DEVMETHOD(device_detach, bwn_detach),
DEVMETHOD(device_suspend, bwn_suspend),
DEVMETHOD(device_resume, bwn_resume),
DEVMETHOD_END
};
static driver_t bwn_driver = {
"bwn",
bwn_methods,
sizeof(struct bwn_softc)
};
static devclass_t bwn_devclass;
DRIVER_MODULE(bwn, siba_bwn, bwn_driver, bwn_devclass, 0, 0);
MODULE_DEPEND(bwn, siba_bwn, 1, 1, 1);
MODULE_DEPEND(bwn, wlan, 1, 1, 1); /* 802.11 media layer */
MODULE_DEPEND(bwn, firmware, 1, 1, 1); /* firmware support */
MODULE_DEPEND(bwn, wlan_amrr, 1, 1, 1);
Index: head/sys/dev/bwn/if_bwnvar.h
===================================================================
--- head/sys/dev/bwn/if_bwnvar.h (revision 287196)
+++ head/sys/dev/bwn/if_bwnvar.h (revision 287197)
@@ -1,952 +1,952 @@
/*-
* Copyright (c) 2009-2010 Weongyo Jeong <weongyo@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
#ifndef _IF_BWNVAR_H
#define _IF_BWNVAR_H
struct siba_dev_softc;
struct bwn_softc;
struct bwn_mac;
#define N(a) (sizeof(a) / sizeof(a[0]))
#define BWN_ALIGN 0x1000
#define BWN_BUS_SPACE_MAXADDR_30BIT 0x3fffffff
#define BWN_RETRY_SHORT 7
#define BWN_RETRY_LONG 4
#define BWN_STAID_MAX 64
#define BWN_TXPWR_IGNORE_TIME (1 << 0)
#define BWN_TXPWR_IGNORE_TSSI (1 << 1)
#define BWN_HAS_TXMAG(phy) \
(((phy)->rev >= 2) && ((phy)->rf_ver == 0x2050) && \
((phy)->rf_rev == 8))
#define BWN_HAS_LOOPBACK(phy) \
(((phy)->rev > 1) || ((phy)->gmode))
#define BWN_TXERROR_MAX 1000
#define BWN_GETTIME(v) do { \
struct timespec ts; \
nanouptime(&ts); \
(v) = ts.tv_nsec / 1000000 + ts.tv_sec * 1000; \
} while (0)
#define BWN_ISOLDFMT(mac) ((mac)->mac_fw.rev <= 351)
#define BWN_TSSI2DBM(num, den) \
((int32_t)((num < 0) ? num / den : (num + den / 2) / den))
#define BWN_HDRSIZE(mac) \
((BWN_ISOLDFMT(mac)) ? (100 + sizeof(struct bwn_plcp6)) : \
(104 + sizeof(struct bwn_plcp6)))
#define BWN_PIO_COOKIE(tq, tp) \
((uint16_t)((((uint16_t)tq->tq_index + 1) << 12) | tp->tp_index))
#define BWN_DMA_COOKIE(dr, slot) \
((uint16_t)(((uint16_t)dr->dr_index + 1) << 12) | (uint16_t)slot)
#define BWN_READ_2(mac, o) (siba_read_2(mac->mac_sc->sc_dev, o))
#define BWN_READ_4(mac, o) (siba_read_4(mac->mac_sc->sc_dev, o))
#define BWN_WRITE_2(mac, o, v) \
(siba_write_2(mac->mac_sc->sc_dev, o, v))
#define BWN_WRITE_4(mac, o, v) \
(siba_write_4(mac->mac_sc->sc_dev, o, v))
#define BWN_PIO_TXQOFFSET(mac) \
((siba_get_revid(mac->mac_sc->sc_dev) >= 11) ? 0x18 : 0)
#define BWN_PIO_RXQOFFSET(mac) \
((siba_get_revid(mac->mac_sc->sc_dev) >= 11) ? 0x38 : 8)
#define BWN_SEC_NEWAPI(mac) (mac->mac_fw.rev >= 351)
#define BWN_SEC_KEY2FW(mac, idx) \
(BWN_SEC_NEWAPI(mac) ? idx : ((idx >= 4) ? idx - 4 : idx))
#define BWN_RF_READ(mac, r) (mac->mac_phy.rf_read(mac, r))
#define BWN_RF_WRITE(mac, r, v) (mac->mac_phy.rf_write(mac, r, v))
#define BWN_RF_MASK(mac, o, m) \
BWN_RF_WRITE(mac, o, BWN_RF_READ(mac, o) & m)
#define BWN_RF_SETMASK(mac, offset, mask, set) \
BWN_RF_WRITE(mac, offset, (BWN_RF_READ(mac, offset) & mask) | set)
#define BWN_RF_SET(mac, offset, set) \
BWN_RF_WRITE(mac, offset, BWN_RF_READ(mac, offset) | set)
#define BWN_PHY_READ(mac, r) (mac->mac_phy.phy_read(mac, r))
#define BWN_PHY_WRITE(mac, r, v) \
(mac->mac_phy.phy_write(mac, r, v))
#define BWN_PHY_SET(mac, offset, set) do { \
if (mac->mac_phy.phy_maskset != NULL) { \
KASSERT(mac->mac_status < BWN_MAC_STATUS_INITED || \
mac->mac_suspended > 0, \
("dont access PHY or RF registers after turning on MAC")); \
mac->mac_phy.phy_maskset(mac, offset, 0xffff, set); \
} else \
BWN_PHY_WRITE(mac, offset, \
BWN_PHY_READ(mac, offset) | (set)); \
} while (0)
#define BWN_PHY_SETMASK(mac, offset, mask, set) do { \
if (mac->mac_phy.phy_maskset != NULL) { \
KASSERT(mac->mac_status < BWN_MAC_STATUS_INITED || \
mac->mac_suspended > 0, \
("dont access PHY or RF registers after turning on MAC")); \
mac->mac_phy.phy_maskset(mac, offset, mask, set); \
} else \
BWN_PHY_WRITE(mac, offset, \
(BWN_PHY_READ(mac, offset) & (mask)) | (set)); \
} while (0)
#define BWN_PHY_MASK(mac, offset, mask) do { \
if (mac->mac_phy.phy_maskset != NULL) { \
KASSERT(mac->mac_status < BWN_MAC_STATUS_INITED || \
mac->mac_suspended > 0, \
("dont access PHY or RF registers after turning on MAC")); \
mac->mac_phy.phy_maskset(mac, offset, mask, 0); \
} else \
BWN_PHY_WRITE(mac, offset, \
BWN_PHY_READ(mac, offset) & mask); \
} while (0)
#define BWN_PHY_COPY(mac, dst, src) do { \
KASSERT(mac->mac_status < BWN_MAC_STATUS_INITED || \
mac->mac_suspended > 0, \
("dont access PHY or RF registers after turning on MAC")); \
BWN_PHY_WRITE(mac, dst, BWN_PHY_READ(mac, src)); \
} while (0)
#define BWN_LO_CALIB_EXPIRE (1000 * (30 - 2))
#define BWN_LO_PWRVEC_EXPIRE (1000 * (30 - 2))
#define BWN_LO_TXCTL_EXPIRE (1000 * (180 - 4))
#define BWN_DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
#define BWN_LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0))
#define BWN_BITREV4(tmp) (BWN_BITREV8(tmp) >> 4)
#define BWN_BITREV8(byte) (bwn_bitrev_table[byte])
#define BWN_BBATTCMP(a, b) ((a)->att == (b)->att)
#define BWN_RFATTCMP(a, b) \
(((a)->att == (b)->att) && ((a)->padmix == (b)->padmix))
#define BWN_PIO_WRITE_2(mac, tq, offset, value) \
BWN_WRITE_2(mac, (tq)->tq_base + offset, value)
#define BWN_PIO_READ_4(mac, tq, offset) \
BWN_READ_4(mac, tq->tq_base + offset)
#define BWN_ISCCKRATE(rate) \
(rate == BWN_CCK_RATE_1MB || rate == BWN_CCK_RATE_2MB || \
rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)
#define BWN_ISOFDMRATE(rate) (!BWN_ISCCKRATE(rate))
#define BWN_BARRIER(mac, flags) siba_barrier(mac->mac_sc->sc_dev, flags)
#define BWN_DMA_READ(dr, offset) \
(BWN_READ_4(dr->dr_mac, dr->dr_base + offset))
#define BWN_DMA_WRITE(dr, offset, value) \
(BWN_WRITE_4(dr->dr_mac, dr->dr_base + offset, value))
struct bwn_rate {
uint16_t rateid;
uint32_t flags;
};
#define BWN_ANT0 0
#define BWN_ANT1 1
#define BWN_ANTAUTO0 2
#define BWN_ANTAUTO1 3
#define BWN_ANT2 4
#define BWN_ANT3 8
#define BWN_ANTAUTO BWN_ANTAUTO0
#define BWN_ANT_DEFAULT BWN_ANTAUTO
#define BWN_TX_SLOTS_PER_FRAME 2
struct bwn_channel {
unsigned freq;
unsigned ieee;
unsigned maxTxPow;
};
struct bwn_channelinfo {
struct bwn_channel channels[IEEE80211_CHAN_MAX];
unsigned nchannels;
};
struct bwn_bbatt {
uint8_t att;
};
struct bwn_bbatt_list {
const struct bwn_bbatt *array;
uint8_t len;
uint8_t min;
uint8_t max;
};
struct bwn_rfatt {
uint8_t att;
int padmix;
};
struct bwn_rfatt_list {
const struct bwn_rfatt *array;
uint8_t len;
uint8_t min;
uint8_t max;
};
#define BWN_DC_LT_SIZE 32
struct bwn_loctl {
int8_t i;
int8_t q;
};
struct bwn_lo_calib {
struct bwn_bbatt bbatt;
struct bwn_rfatt rfatt;
struct bwn_loctl ctl;
unsigned long calib_time;
TAILQ_ENTRY(bwn_lo_calib) list;
};
struct bwn_rxhdr4 {
uint16_t frame_len;
uint8_t pad1[2];
uint16_t phy_status0;
union {
struct {
uint8_t rssi;
uint8_t sig_qual;
} __packed abg;
struct {
int8_t power0;
int8_t power1;
} __packed n;
} __packed phy;
uint16_t phy_status2;
uint16_t phy_status3;
uint32_t mac_status;
uint16_t mac_time;
uint16_t channel;
} __packed;
struct bwn_txstatus {
uint16_t cookie;
uint16_t seq;
uint8_t phy_stat;
uint8_t framecnt;
uint8_t rtscnt;
uint8_t sreason;
uint8_t pm;
uint8_t im;
uint8_t ampdu;
uint8_t ack;
};
#define BWN_TXCTL_PA3DB 0x40
#define BWN_TXCTL_PA2DB 0x20
#define BWN_TXCTL_TXMIX 0x10
struct bwn_txpwr_loctl {
struct bwn_rfatt_list rfatt;
struct bwn_bbatt_list bbatt;
uint16_t dc_lt[BWN_DC_LT_SIZE];
TAILQ_HEAD(, bwn_lo_calib) calib_list;
unsigned long pwr_vec_read_time;
unsigned long txctl_measured_time;
uint8_t tx_bias;
uint8_t tx_magn;
uint64_t power_vector;
};
#define BWN_OFDMTAB_DIR_UNKNOWN 0
#define BWN_OFDMTAB_DIR_READ 1
#define BWN_OFDMTAB_DIR_WRITE 2
struct bwn_phy_g {
unsigned pg_flags;
#define BWN_PHY_G_FLAG_TSSITABLE_ALLOC (1 << 0)
#define BWN_PHY_G_FLAG_RADIOCTX_VALID (1 << 1)
int pg_aci_enable;
int pg_aci_wlan_automatic;
int pg_aci_hw_rssi;
int pg_rf_on;
uint16_t pg_radioctx_over;
uint16_t pg_radioctx_overval;
uint16_t pg_minlowsig[2];
uint16_t pg_minlowsigpos[2];
int8_t *pg_tssi2dbm;
int pg_idletssi;
int pg_curtssi;
uint8_t pg_avgtssi;
struct bwn_bbatt pg_bbatt;
struct bwn_rfatt pg_rfatt;
uint8_t pg_txctl;
int pg_bbatt_delta;
int pg_rfatt_delta;
struct bwn_txpwr_loctl pg_loctl;
int16_t pg_max_lb_gain;
int16_t pg_trsw_rx_gain;
int16_t pg_lna_lod_gain;
int16_t pg_lna_gain;
int16_t pg_pga_gain;
int pg_immode;
#define BWN_INTERFSTACK_SIZE 26
uint32_t pg_interfstack[BWN_INTERFSTACK_SIZE];
int16_t pg_nrssi[2];
int32_t pg_nrssi_slope;
int8_t pg_nrssi_lt[64];
uint16_t pg_lofcal;
uint16_t pg_initval;
uint16_t pg_ofdmtab_addr;
unsigned pg_ofdmtab_dir;
};
#define BWN_IMMODE_NONE 0
#define BWN_IMMODE_NONWLAN 1
#define BWN_IMMODE_MANUAL 2
#define BWN_IMMODE_AUTO 3
#define BWN_TXPWR_RES_NEED_ADJUST 0
#define BWN_TXPWR_RES_DONE 1
#define BWN_PHYLP_TXPCTL_UNKNOWN 0
#define BWN_PHYLP_TXPCTL_OFF 1
#define BWN_PHYLP_TXPCTL_ON_SW 2
#define BWN_PHYLP_TXPCTL_ON_HW 3
struct bwn_phy_lp {
uint8_t plp_chan;
uint8_t plp_chanfullcal;
int32_t plp_antenna;
uint8_t plp_txpctlmode;
uint8_t plp_txisoband_h;
uint8_t plp_txisoband_m;
uint8_t plp_txisoband_l;
uint8_t plp_rxpwroffset;
int8_t plp_txpwridx;
uint16_t plp_tssiidx;
uint16_t plp_tssinpt;
uint8_t plp_rssivf;
uint8_t plp_rssivc;
uint8_t plp_rssigs;
uint8_t plp_rccap;
uint8_t plp_bxarch;
uint8_t plp_crsusr_off;
uint8_t plp_crssys_off;
uint32_t plp_div;
int32_t plp_tonefreq;
uint16_t plp_digfilt[9];
};
/* for LP */
struct bwn_txgain {
uint16_t tg_gm;
uint16_t tg_pga;
uint16_t tg_pad;
uint16_t tg_dac;
};
struct bwn_rxcompco {
uint8_t rc_chan;
int8_t rc_c1;
int8_t rc_c0;
};
struct bwn_phy_lp_iq_est {
uint32_t ie_iqprod;
uint32_t ie_ipwr;
uint32_t ie_qpwr;
};
struct bwn_txgain_entry {
uint8_t te_gm;
uint8_t te_pga;
uint8_t te_pad;
uint8_t te_dac;
uint8_t te_bbmult;
};
/* only for LP PHY */
struct bwn_stxtable {
uint16_t st_phyoffset;
uint16_t st_physhift;
uint16_t st_rfaddr;
uint16_t st_rfshift;
uint16_t st_mask;
};
struct bwn_b206x_chan {
uint8_t bc_chan;
uint16_t bc_freq;
const uint8_t *bc_data;
};
struct bwn_b206x_rfinit_entry {
uint16_t br_offset;
uint16_t br_valuea;
uint16_t br_valueg;
uint8_t br_flags;
};
struct bwn_phy {
uint8_t type;
uint8_t rev;
uint8_t analog;
int supports_2ghz;
int supports_5ghz;
int gmode;
struct bwn_phy_g phy_g;
struct bwn_phy_lp phy_lp;
uint16_t rf_manuf;
uint16_t rf_ver;
uint8_t rf_rev;
int rf_on;
int txpower;
int hwpctl;
unsigned long nexttime;
unsigned int chan;
int txerrors;
int (*attach)(struct bwn_mac *);
void (*detach)(struct bwn_mac *);
int (*prepare_hw)(struct bwn_mac *);
void (*init_pre)(struct bwn_mac *);
int (*init)(struct bwn_mac *);
void (*exit)(struct bwn_mac *);
uint16_t (*phy_read)(struct bwn_mac *, uint16_t);
void (*phy_write)(struct bwn_mac *, uint16_t,
uint16_t);
void (*phy_maskset)(struct bwn_mac *,
uint16_t, uint16_t, uint16_t);
uint16_t (*rf_read)(struct bwn_mac *, uint16_t);
void (*rf_write)(struct bwn_mac *, uint16_t,
uint16_t);
int (*use_hwpctl)(struct bwn_mac *);
void (*rf_onoff)(struct bwn_mac *, int);
void (*switch_analog)(struct bwn_mac *, int);
int (*switch_channel)(struct bwn_mac *,
unsigned int);
uint32_t (*get_default_chan)(struct bwn_mac *);
void (*set_antenna)(struct bwn_mac *, int);
int (*set_im)(struct bwn_mac *, int);
int (*recalc_txpwr)(struct bwn_mac *, int);
void (*set_txpwr)(struct bwn_mac *);
void (*task_15s)(struct bwn_mac *);
void (*task_60s)(struct bwn_mac *);
};
struct bwn_chan_band {
uint32_t flags;
uint8_t nchan;
#define BWN_MAX_CHAN_PER_BAND 14
uint8_t chan[BWN_MAX_CHAN_PER_BAND];
};
#define BWN_NR_WMEPARAMS 16
enum {
BWN_WMEPARAM_TXOP = 0,
BWN_WMEPARAM_CWMIN,
BWN_WMEPARAM_CWMAX,
BWN_WMEPARAM_CWCUR,
BWN_WMEPARAM_AIFS,
BWN_WMEPARAM_BSLOTS,
BWN_WMEPARAM_REGGAP,
BWN_WMEPARAM_STATUS,
};
#define BWN_WME_PARAMS(queue) \
(BWN_SHARED_EDCFQ + (BWN_NR_WMEPARAMS * sizeof(uint16_t) * (queue)))
#define BWN_WME_BACKGROUND BWN_WME_PARAMS(0)
#define BWN_WME_BESTEFFORT BWN_WME_PARAMS(1)
#define BWN_WME_VIDEO BWN_WME_PARAMS(2)
#define BWN_WME_VOICE BWN_WME_PARAMS(3)
/*
* Radio capture format.
*/
#define BWN_RX_RADIOTAP_PRESENT ( \
(1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_ANTENNA) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | \
0)
struct bwn_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsf;
u_int8_t wr_flags;
u_int8_t wr_rate;
u_int16_t wr_chan_freq;
u_int16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
u_int8_t wr_antenna;
};
#define BWN_TX_RADIOTAP_PRESENT ( \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_TX_POWER) | \
(1 << IEEE80211_RADIOTAP_ANTENNA) | \
0)
struct bwn_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
u_int8_t wt_flags;
u_int8_t wt_rate;
u_int16_t wt_chan_freq;
u_int16_t wt_chan_flags;
u_int8_t wt_txpower;
u_int8_t wt_antenna;
};
struct bwn_stats {
int32_t rtsfail;
int32_t rts;
int32_t link_noise;
};
/* Noise Calculation (Link Quality) */
struct bwn_noise {
uint8_t noi_running;
uint8_t noi_nsamples;
int8_t noi_samples[8][4];
};
#define BWN_DMA_30BIT 30
#define BWN_DMA_32BIT 32
#define BWN_DMA_64BIT 64
struct bwn_dmadesc_meta {
bus_dmamap_t mt_dmap;
bus_addr_t mt_paddr;
struct mbuf *mt_m;
struct ieee80211_node *mt_ni;
uint8_t mt_txtype;
#define BWN_DMADESC_METATYPE_HEADER 0
#define BWN_DMADESC_METATYPE_BODY 1
uint8_t mt_islast;
};
#define BWN_DMAINTR_FATALMASK \
((1 << 10) | (1 << 11) | (1 << 12) | (1 << 14) | (1 << 15))
#define BWN_DMAINTR_NONFATALMASK (1 << 13)
#define BWN_DMAINTR_RX_DONE (1 << 16)
#define BWN_DMA32_DCTL_BYTECNT 0x00001fff
#define BWN_DMA32_DCTL_ADDREXT_MASK 0x00030000
#define BWN_DMA32_DCTL_ADDREXT_SHIFT 16
#define BWN_DMA32_DCTL_DTABLEEND 0x10000000
#define BWN_DMA32_DCTL_IRQ 0x20000000
#define BWN_DMA32_DCTL_FRAMEEND 0x40000000
#define BWN_DMA32_DCTL_FRAMESTART 0x80000000
struct bwn_dmadesc32 {
uint32_t control;
uint32_t address;
} __packed;
#define BWN_DMA64_DCTL0_DTABLEEND 0x10000000
#define BWN_DMA64_DCTL0_IRQ 0x20000000
#define BWN_DMA64_DCTL0_FRAMEEND 0x40000000
#define BWN_DMA64_DCTL0_FRAMESTART 0x80000000
#define BWN_DMA64_DCTL1_BYTECNT 0x00001fff
#define BWN_DMA64_DCTL1_ADDREXT_MASK 0x00030000
#define BWN_DMA64_DCTL1_ADDREXT_SHIFT 16
struct bwn_dmadesc64 {
uint32_t control0;
uint32_t control1;
uint32_t address_low;
uint32_t address_high;
} __packed;
struct bwn_dmadesc_generic {
union {
struct bwn_dmadesc32 dma32;
struct bwn_dmadesc64 dma64;
} __packed dma;
} __packed;
struct bwn_dma_ring;
struct bwn_dma_ring {
struct bwn_mac *dr_mac;
const struct bwn_dma_ops *dr_ops;
struct bwn_dmadesc_meta *dr_meta;
void *dr_txhdr_cache;
bus_dma_tag_t dr_ring_dtag;
bus_dma_tag_t dr_txring_dtag;
bus_dmamap_t dr_spare_dmap; /* only for RX */
bus_dmamap_t dr_ring_dmap;
bus_addr_t dr_txring_paddr;
void *dr_ring_descbase;
bus_addr_t dr_ring_dmabase;
int dr_numslots;
int dr_usedslot;
int dr_curslot;
uint32_t dr_frameoffset;
uint16_t dr_rx_bufsize;
uint16_t dr_base;
int dr_index;
uint8_t dr_tx;
uint8_t dr_stop;
int dr_type;
void (*getdesc)(struct bwn_dma_ring *,
int, struct bwn_dmadesc_generic **,
struct bwn_dmadesc_meta **);
void (*setdesc)(struct bwn_dma_ring *,
struct bwn_dmadesc_generic *,
bus_addr_t, uint16_t, int, int,
int);
void (*start_transfer)(struct bwn_dma_ring *,
int);
void (*suspend)(struct bwn_dma_ring *);
void (*resume)(struct bwn_dma_ring *);
int (*get_curslot)(struct bwn_dma_ring *);
void (*set_curslot)(struct bwn_dma_ring *,
int);
};
struct bwn_dma {
int dmatype;
bus_dma_tag_t parent_dtag;
bus_dma_tag_t rxbuf_dtag;
bus_dma_tag_t txbuf_dtag;
struct bwn_dma_ring *wme[5];
struct bwn_dma_ring *mcast;
struct bwn_dma_ring *rx;
uint64_t lastseq; /* XXX FIXME */
};
struct bwn_pio_rxqueue {
struct bwn_mac *prq_mac;
uint16_t prq_base;
uint8_t prq_rev;
};
struct bwn_pio_txqueue;
struct bwn_pio_txpkt {
struct bwn_pio_txqueue *tp_queue;
struct ieee80211_node *tp_ni;
struct mbuf *tp_m;
uint8_t tp_index;
TAILQ_ENTRY(bwn_pio_txpkt) tp_list;
};
#define BWN_PIO_MAX_TXPACKETS 32
struct bwn_pio_txqueue {
uint16_t tq_base;
uint16_t tq_size;
uint16_t tq_used;
uint16_t tq_free;
- uint8_t tq_stop;
uint8_t tq_index;
struct bwn_pio_txpkt tq_pkts[BWN_PIO_MAX_TXPACKETS];
TAILQ_HEAD(, bwn_pio_txpkt) tq_pktlist;
};
struct bwn_pio {
struct bwn_pio_txqueue wme[5];
struct bwn_pio_txqueue mcast;
struct bwn_pio_rxqueue rx;
};
struct bwn_plcp4 {
union {
uint32_t data;
uint8_t raw[4];
} __packed o;
} __packed;
struct bwn_plcp6 {
union {
uint32_t data;
uint8_t raw[6];
} __packed o;
} __packed;
struct bwn_txhdr {
uint32_t macctl;
uint8_t macfc[2];
uint16_t tx_festime;
uint16_t phyctl;
uint16_t phyctl_1;
uint16_t phyctl_1fb;
uint16_t phyctl_1rts;
uint16_t phyctl_1rtsfb;
uint8_t phyrate;
uint8_t phyrate_rts;
uint8_t eftypes; /* extra frame types */
uint8_t chan;
uint8_t iv[16];
uint8_t addr1[IEEE80211_ADDR_LEN];
uint16_t tx_festime_fb;
struct bwn_plcp6 rts_plcp_fb;
uint16_t rts_dur_fb;
struct bwn_plcp6 plcp_fb;
uint16_t dur_fb;
uint16_t mimo_modelen;
uint16_t mimo_ratelen_fb;
uint32_t timeout;
union {
/* format <= r351 */
struct {
uint8_t pad0[2];
uint16_t cookie;
uint16_t tx_status;
struct bwn_plcp6 rts_plcp;
uint8_t rts_frame[16];
uint8_t pad1[2];
struct bwn_plcp6 plcp;
} __packed old;
/* format > r410 */
struct {
uint16_t mimo_antenna;
uint16_t preload_size;
uint8_t pad0[2];
uint16_t cookie;
uint16_t tx_status;
struct bwn_plcp6 rts_plcp;
uint8_t rts_frame[16];
uint8_t pad1[2];
struct bwn_plcp6 plcp;
} __packed new;
} __packed body;
} __packed;
#define BWN_FWTYPE_UCODE 'u'
#define BWN_FWTYPE_PCM 'p'
#define BWN_FWTYPE_IV 'i'
struct bwn_fwhdr {
uint8_t type;
uint8_t ver;
uint8_t pad[2];
uint32_t size;
} __packed;
#define BWN_FWINITVALS_OFFSET_MASK 0x7fff
#define BWN_FWINITVALS_32BIT 0x8000
struct bwn_fwinitvals {
uint16_t offset_size;
union {
uint16_t d16;
uint32_t d32;
} __packed data;
} __packed;
enum bwn_fwtype {
BWN_FWTYPE_DEFAULT,
BWN_FWTYPE_OPENSOURCE,
BWN_NR_FWTYPES,
};
struct bwn_fwfile {
const char *filename;
const struct firmware *fw;
enum bwn_fwtype type;
};
struct bwn_key {
void *keyconf;
uint8_t algorithm;
};
struct bwn_fw {
struct bwn_fwfile ucode;
struct bwn_fwfile pcm;
struct bwn_fwfile initvals;
struct bwn_fwfile initvals_band;
uint16_t rev;
uint16_t patch;
uint8_t opensource;
uint8_t no_pcmfile;
};
struct bwn_lo_g_sm {
int curstate;
int nmeasure;
int multipler;
uint16_t feedth;
struct bwn_loctl loctl;
};
struct bwn_lo_g_value {
uint8_t old_channel;
uint16_t phy_lomask;
uint16_t phy_extg;
uint16_t phy_dacctl_hwpctl;
uint16_t phy_dacctl;
uint16_t phy_hpwr_tssictl;
uint16_t phy_analogover;
uint16_t phy_analogoverval;
uint16_t phy_rfover;
uint16_t phy_rfoverval;
uint16_t phy_classctl;
uint16_t phy_crs0;
uint16_t phy_pgactl;
uint16_t phy_syncctl;
uint16_t phy_cck0;
uint16_t phy_cck1;
uint16_t phy_cck2;
uint16_t phy_cck3;
uint16_t phy_cck4;
uint16_t reg0;
uint16_t reg1;
uint16_t rf0;
uint16_t rf1;
uint16_t rf2;
};
#define BWN_LED_MAX 4
#define BWN_LED_EVENT_NONE -1
#define BWN_LED_EVENT_POLL 0
#define BWN_LED_EVENT_TX 1
#define BWN_LED_EVENT_RX 2
#define BWN_LED_SLOWDOWN(dur) (dur) = (((dur) * 3) / 2)
struct bwn_led {
uint8_t led_flags; /* BWN_LED_F_ */
uint8_t led_act; /* BWN_LED_ACT_ */
uint8_t led_mask;
};
#define BWN_LED_F_ACTLOW 0x1
#define BWN_LED_F_BLINK 0x2
#define BWN_LED_F_POLLABLE 0x4
#define BWN_LED_F_SLOW 0x8
struct bwn_mac {
struct bwn_softc *mac_sc;
unsigned mac_status;
#define BWN_MAC_STATUS_UNINIT 0
#define BWN_MAC_STATUS_INITED 1
#define BWN_MAC_STATUS_STARTED 2
unsigned mac_flags;
/* use "Bad Frames Preemption" */
#define BWN_MAC_FLAG_BADFRAME_PREEMP (1 << 0)
#define BWN_MAC_FLAG_DFQVALID (1 << 1)
#define BWN_MAC_FLAG_RADIO_ON (1 << 2)
#define BWN_MAC_FLAG_DMA (1 << 3)
#define BWN_MAC_FLAG_WME (1 << 4)
#define BWN_MAC_FLAG_HWCRYPTO (1 << 5)
struct resource_spec *mac_intr_spec;
#define BWN_MSI_MESSAGES 1
struct resource *mac_res_irq[BWN_MSI_MESSAGES];
void *mac_intrhand[BWN_MSI_MESSAGES];
int mac_msi;
struct bwn_noise mac_noise;
struct bwn_phy mac_phy;
struct bwn_stats mac_stats;
uint32_t mac_reason_intr;
uint32_t mac_reason[6];
uint32_t mac_intr_mask;
int mac_suspended;
struct bwn_fw mac_fw;
union {
struct bwn_dma dma;
struct bwn_pio pio;
} mac_method;
uint16_t mac_ktp; /* Key table pointer */
uint8_t mac_max_nr_keys;
struct bwn_key mac_key[58];
unsigned int mac_task_state;
struct task mac_intrtask;
struct task mac_hwreset;
struct task mac_txpower;
TAILQ_ENTRY(bwn_mac) mac_list;
};
/*
* Driver-specific vap state.
*/
struct bwn_vap {
struct ieee80211vap bv_vap; /* base class */
int (*bv_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define BWN_VAP(vap) ((struct bwn_vap *)(vap))
#define BWN_VAP_CONST(vap) ((const struct mwl_vap *)(vap))
struct bwn_softc {
device_t sc_dev;
struct mtx sc_mtx;
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
unsigned sc_flags;
#define BWN_FLAG_ATTACHED (1 << 0)
#define BWN_FLAG_INVALID (1 << 1)
#define BWN_FLAG_NEED_BEACON_TP (1 << 2)
+#define BWN_FLAG_RUNNING (1 << 3)
unsigned sc_debug;
struct bwn_mac *sc_curmac;
TAILQ_HEAD(, bwn_mac) sc_maclist;
- uint8_t sc_macaddr[IEEE80211_ADDR_LEN];
uint8_t sc_bssid[IEEE80211_ADDR_LEN];
unsigned int sc_filters;
uint8_t sc_beacons[2];
uint8_t sc_rf_enabled;
struct wmeParams sc_wmeParams[4];
struct callout sc_rfswitch_ch; /* for laptop */
struct callout sc_task_ch;
struct callout sc_watchdog_ch;
int sc_watchdog_timer;
struct taskqueue *sc_tq; /* private task queue */
int (*sc_newstate)(struct ieee80211com *,
enum ieee80211_state, int);
void (*sc_node_cleanup)(
struct ieee80211_node *);
int sc_rx_rate;
int sc_tx_rate;
int sc_led_blinking;
int sc_led_ticks;
struct bwn_led *sc_blink_led;
struct callout sc_led_blink_ch;
int sc_led_blink_offdur;
struct bwn_led sc_leds[BWN_LED_MAX];
int sc_led_idle;
int sc_led_blink;
struct bwn_tx_radiotap_header sc_tx_th;
struct bwn_rx_radiotap_header sc_rx_th;
};
#define BWN_LOCK_INIT(sc) \
mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->sc_dev), \
MTX_NETWORK_LOCK, MTX_DEF)
#define BWN_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
#define BWN_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define BWN_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define BWN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
#endif /* !_IF_BWNVAR_H */
Index: head/sys/dev/if_ndis/if_ndis.c
===================================================================
--- head/sys/dev/if_ndis/if_ndis.c (revision 287196)
+++ head/sys/dev/if_ndis/if_ndis.c (revision 287197)
@@ -1,3397 +1,3415 @@
/*-
* Copyright (c) 2003
* Bill Paul <wpaul@windriver.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* WPA support originally contributed by Arvind Srinivasan <arvind@celar.us>
* then hacked upon mercilessly by my.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/endian.h>
#include <sys/priv.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/queue.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/kthread.h>
+#include <sys/limits.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/route.h>
#include <net/bpf.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_ioctl.h>
#include <net80211/ieee80211_regdomain.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <compat/ndis/pe_var.h>
#include <compat/ndis/cfg_var.h>
#include <compat/ndis/resource_var.h>
#include <compat/ndis/ntoskrnl_var.h>
#include <compat/ndis/hal_var.h>
#include <compat/ndis/ndis_var.h>
#include <compat/ndis/usbd_var.h>
#include <dev/if_ndis/if_ndisvar.h>
#define NDIS_DEBUG
#ifdef NDIS_DEBUG
#define DPRINTF(x) do { if (ndis_debug > 0) printf x; } while (0)
int ndis_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, ndis, CTLFLAG_RW, &ndis_debug, 0,
"if_ndis debug level");
#else
#define DPRINTF(x)
#endif
SYSCTL_DECL(_hw_ndisusb);
int ndisusb_halt = 1;
SYSCTL_INT(_hw_ndisusb, OID_AUTO, halt, CTLFLAG_RW, &ndisusb_halt, 0,
"Halt NDIS USB driver when it's attached");
/* 0 - 30 dBm to mW conversion table */
static const uint16_t dBm2mW[] = {
1, 1, 1, 1, 2, 2, 2, 2, 3, 3,
3, 4, 4, 4, 5, 6, 6, 7, 8, 9,
10, 11, 13, 14, 16, 18, 20, 22, 25, 28,
32, 35, 40, 45, 50, 56, 63, 71, 79, 89,
100, 112, 126, 141, 158, 178, 200, 224, 251, 282,
316, 355, 398, 447, 501, 562, 631, 708, 794, 891,
1000
};
MODULE_DEPEND(ndis, ether, 1, 1, 1);
MODULE_DEPEND(ndis, wlan, 1, 1, 1);
MODULE_DEPEND(ndis, ndisapi, 1, 1, 1);
MODULE_VERSION(ndis, 1);
int ndis_attach (device_t);
int ndis_detach (device_t);
int ndis_suspend (device_t);
int ndis_resume (device_t);
void ndis_shutdown (device_t);
int ndisdrv_modevent (module_t, int, void *);
static void ndis_txeof (ndis_handle, ndis_packet *, ndis_status);
static void ndis_rxeof (ndis_handle, ndis_packet **, uint32_t);
static void ndis_rxeof_eth (ndis_handle, ndis_handle, char *, void *,
uint32_t, void *, uint32_t, uint32_t);
static void ndis_rxeof_done (ndis_handle);
static void ndis_rxeof_xfr (kdpc *, ndis_handle, void *, void *);
static void ndis_rxeof_xfr_done (ndis_handle, ndis_packet *,
uint32_t, uint32_t);
static void ndis_linksts (ndis_handle, ndis_status, void *, uint32_t);
static void ndis_linksts_done (ndis_handle);
/* We need to wrap these functions for amd64. */
static funcptr ndis_txeof_wrap;
static funcptr ndis_rxeof_wrap;
static funcptr ndis_rxeof_eth_wrap;
static funcptr ndis_rxeof_done_wrap;
static funcptr ndis_rxeof_xfr_wrap;
static funcptr ndis_rxeof_xfr_done_wrap;
static funcptr ndis_linksts_wrap;
static funcptr ndis_linksts_done_wrap;
static funcptr ndis_ticktask_wrap;
static funcptr ndis_starttask_wrap;
static funcptr ndis_resettask_wrap;
static funcptr ndis_inputtask_wrap;
static struct ieee80211vap *ndis_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void ndis_vap_delete (struct ieee80211vap *);
static void ndis_tick (void *);
static void ndis_ticktask (device_object *, void *);
static int ndis_raw_xmit (struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void ndis_update_mcast (struct ieee80211com *);
static void ndis_update_promisc (struct ieee80211com *);
static void ndis_start (struct ifnet *);
static void ndis_starttask (device_object *, void *);
static void ndis_resettask (device_object *, void *);
static void ndis_inputtask (device_object *, void *);
static int ndis_ioctl (struct ifnet *, u_long, caddr_t);
-static int ndis_ioctl_80211 (struct ifnet *, u_long, caddr_t);
static int ndis_newstate (struct ieee80211vap *, enum ieee80211_state,
int);
static int ndis_nettype_chan (uint32_t);
static int ndis_nettype_mode (uint32_t);
static void ndis_scan (void *);
static void ndis_scan_results (struct ndis_softc *);
static void ndis_scan_start (struct ieee80211com *);
static void ndis_scan_end (struct ieee80211com *);
static void ndis_set_channel (struct ieee80211com *);
static void ndis_scan_curchan (struct ieee80211_scan_state *, unsigned long);
static void ndis_scan_mindwell (struct ieee80211_scan_state *);
static void ndis_init (void *);
static void ndis_stop (struct ndis_softc *);
static int ndis_ifmedia_upd (struct ifnet *);
static void ndis_ifmedia_sts (struct ifnet *, struct ifmediareq *);
static int ndis_get_bssid_list (struct ndis_softc *,
ndis_80211_bssid_list_ex **);
static int ndis_get_assoc (struct ndis_softc *, ndis_wlan_bssid_ex **);
static int ndis_probe_offload (struct ndis_softc *);
static int ndis_set_offload (struct ndis_softc *);
static void ndis_getstate_80211 (struct ndis_softc *);
static void ndis_setstate_80211 (struct ndis_softc *);
static void ndis_auth_and_assoc (struct ndis_softc *, struct ieee80211vap *);
static void ndis_media_status (struct ifnet *, struct ifmediareq *);
static int ndis_set_cipher (struct ndis_softc *, int);
static int ndis_set_wpa (struct ndis_softc *, void *, int);
static int ndis_add_key (struct ieee80211vap *,
const struct ieee80211_key *, const u_int8_t []);
static int ndis_del_key (struct ieee80211vap *,
const struct ieee80211_key *);
-
static void ndis_setmulti (struct ndis_softc *);
static void ndis_map_sclist (void *, bus_dma_segment_t *,
int, bus_size_t, int);
+static int ndis_ifattach(struct ndis_softc *);
+static int ndis_80211attach(struct ndis_softc *);
+static int ndis_80211ioctl(struct ieee80211com *, u_long , void *);
+static int ndis_80211transmit(struct ieee80211com *, struct mbuf *);
+static void ndis_80211parent(struct ieee80211com *);
+
static int ndisdrv_loaded = 0;
/*
* This routine should call windrv_load() once for each driver
* image. This will do the relocation and dynalinking for the
* image, and create a Windows driver object which will be
* saved in our driver database.
*/
int
ndisdrv_modevent(mod, cmd, arg)
module_t mod;
int cmd;
void *arg;
{
int error = 0;
switch (cmd) {
case MOD_LOAD:
ndisdrv_loaded++;
if (ndisdrv_loaded > 1)
break;
windrv_wrap((funcptr)ndis_rxeof, &ndis_rxeof_wrap,
3, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_rxeof_eth, &ndis_rxeof_eth_wrap,
8, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_rxeof_done, &ndis_rxeof_done_wrap,
1, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_rxeof_xfr, &ndis_rxeof_xfr_wrap,
4, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_rxeof_xfr_done,
&ndis_rxeof_xfr_done_wrap, 4, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_txeof, &ndis_txeof_wrap,
3, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_linksts, &ndis_linksts_wrap,
4, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_linksts_done,
&ndis_linksts_done_wrap, 1, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_ticktask, &ndis_ticktask_wrap,
2, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_starttask, &ndis_starttask_wrap,
2, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_resettask, &ndis_resettask_wrap,
2, WINDRV_WRAP_STDCALL);
windrv_wrap((funcptr)ndis_inputtask, &ndis_inputtask_wrap,
2, WINDRV_WRAP_STDCALL);
break;
case MOD_UNLOAD:
ndisdrv_loaded--;
if (ndisdrv_loaded > 0)
break;
/* fallthrough */
case MOD_SHUTDOWN:
windrv_unwrap(ndis_rxeof_wrap);
windrv_unwrap(ndis_rxeof_eth_wrap);
windrv_unwrap(ndis_rxeof_done_wrap);
windrv_unwrap(ndis_rxeof_xfr_wrap);
windrv_unwrap(ndis_rxeof_xfr_done_wrap);
windrv_unwrap(ndis_txeof_wrap);
windrv_unwrap(ndis_linksts_wrap);
windrv_unwrap(ndis_linksts_done_wrap);
windrv_unwrap(ndis_ticktask_wrap);
windrv_unwrap(ndis_starttask_wrap);
windrv_unwrap(ndis_resettask_wrap);
windrv_unwrap(ndis_inputtask_wrap);
break;
default:
error = EINVAL;
break;
}
return (error);
}
/*
* Program the 64-bit multicast hash filter.
*/
static void
ndis_setmulti(sc)
struct ndis_softc *sc;
{
struct ifnet *ifp;
struct ifmultiaddr *ifma;
int len, mclistsz, error;
uint8_t *mclist;
ifp = sc->ifp;
if (!NDIS_INITIALIZED(sc))
return;
if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
len = sizeof(sc->ndis_filter);
error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &len);
if (error)
device_printf(sc->ndis_dev,
"set allmulti failed: %d\n", error);
return;
}
if (TAILQ_EMPTY(&ifp->if_multiaddrs))
return;
len = sizeof(mclistsz);
ndis_get_info(sc, OID_802_3_MAXIMUM_LIST_SIZE, &mclistsz, &len);
mclist = malloc(ETHER_ADDR_LEN * mclistsz, M_TEMP, M_NOWAIT|M_ZERO);
if (mclist == NULL) {
sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
goto out;
}
sc->ndis_filter |= NDIS_PACKET_TYPE_MULTICAST;
len = 0;
if_maddr_rlock(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family != AF_LINK)
continue;
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
mclist + (ETHER_ADDR_LEN * len), ETHER_ADDR_LEN);
len++;
if (len > mclistsz) {
if_maddr_runlock(ifp);
sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
sc->ndis_filter &= ~NDIS_PACKET_TYPE_MULTICAST;
goto out;
}
}
if_maddr_runlock(ifp);
len = len * ETHER_ADDR_LEN;
error = ndis_set_info(sc, OID_802_3_MULTICAST_LIST, mclist, &len);
if (error) {
device_printf(sc->ndis_dev, "set mclist failed: %d\n", error);
sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
sc->ndis_filter &= ~NDIS_PACKET_TYPE_MULTICAST;
}
out:
free(mclist, M_TEMP);
len = sizeof(sc->ndis_filter);
error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &len);
if (error)
device_printf(sc->ndis_dev, "set multi failed: %d\n", error);
}
static int
ndis_set_offload(sc)
struct ndis_softc *sc;
{
ndis_task_offload *nto;
ndis_task_offload_hdr *ntoh;
ndis_task_tcpip_csum *nttc;
struct ifnet *ifp;
int len, error;
ifp = sc->ifp;
if (!NDIS_INITIALIZED(sc))
return (EINVAL);
/* See if there's anything to set. */
error = ndis_probe_offload(sc);
if (error)
return (error);
if (sc->ndis_hwassist == 0 && ifp->if_capabilities == 0)
return (0);
len = sizeof(ndis_task_offload_hdr) + sizeof(ndis_task_offload) +
sizeof(ndis_task_tcpip_csum);
ntoh = malloc(len, M_TEMP, M_NOWAIT|M_ZERO);
if (ntoh == NULL)
return (ENOMEM);
ntoh->ntoh_vers = NDIS_TASK_OFFLOAD_VERSION;
ntoh->ntoh_len = sizeof(ndis_task_offload_hdr);
ntoh->ntoh_offset_firsttask = sizeof(ndis_task_offload_hdr);
ntoh->ntoh_encapfmt.nef_encaphdrlen = sizeof(struct ether_header);
ntoh->ntoh_encapfmt.nef_encap = NDIS_ENCAP_IEEE802_3;
ntoh->ntoh_encapfmt.nef_flags = NDIS_ENCAPFLAG_FIXEDHDRLEN;
nto = (ndis_task_offload *)((char *)ntoh +
ntoh->ntoh_offset_firsttask);
nto->nto_vers = NDIS_TASK_OFFLOAD_VERSION;
nto->nto_len = sizeof(ndis_task_offload);
nto->nto_task = NDIS_TASK_TCPIP_CSUM;
nto->nto_offset_nexttask = 0;
nto->nto_taskbuflen = sizeof(ndis_task_tcpip_csum);
nttc = (ndis_task_tcpip_csum *)nto->nto_taskbuf;
if (ifp->if_capenable & IFCAP_TXCSUM)
nttc->nttc_v4tx = sc->ndis_v4tx;
if (ifp->if_capenable & IFCAP_RXCSUM)
nttc->nttc_v4rx = sc->ndis_v4rx;
error = ndis_set_info(sc, OID_TCP_TASK_OFFLOAD, ntoh, &len);
free(ntoh, M_TEMP);
return (error);
}
static int
ndis_probe_offload(sc)
struct ndis_softc *sc;
{
ndis_task_offload *nto;
ndis_task_offload_hdr *ntoh;
ndis_task_tcpip_csum *nttc = NULL;
struct ifnet *ifp;
int len, error, dummy;
ifp = sc->ifp;
len = sizeof(dummy);
error = ndis_get_info(sc, OID_TCP_TASK_OFFLOAD, &dummy, &len);
if (error != ENOSPC)
return (error);
ntoh = malloc(len, M_TEMP, M_NOWAIT|M_ZERO);
if (ntoh == NULL)
return (ENOMEM);
ntoh->ntoh_vers = NDIS_TASK_OFFLOAD_VERSION;
ntoh->ntoh_len = sizeof(ndis_task_offload_hdr);
ntoh->ntoh_encapfmt.nef_encaphdrlen = sizeof(struct ether_header);
ntoh->ntoh_encapfmt.nef_encap = NDIS_ENCAP_IEEE802_3;
ntoh->ntoh_encapfmt.nef_flags = NDIS_ENCAPFLAG_FIXEDHDRLEN;
error = ndis_get_info(sc, OID_TCP_TASK_OFFLOAD, ntoh, &len);
if (error) {
free(ntoh, M_TEMP);
return (error);
}
if (ntoh->ntoh_vers != NDIS_TASK_OFFLOAD_VERSION) {
free(ntoh, M_TEMP);
return (EINVAL);
}
nto = (ndis_task_offload *)((char *)ntoh +
ntoh->ntoh_offset_firsttask);
while (1) {
switch (nto->nto_task) {
case NDIS_TASK_TCPIP_CSUM:
nttc = (ndis_task_tcpip_csum *)nto->nto_taskbuf;
break;
/* Don't handle these yet. */
case NDIS_TASK_IPSEC:
case NDIS_TASK_TCP_LARGESEND:
default:
break;
}
if (nto->nto_offset_nexttask == 0)
break;
nto = (ndis_task_offload *)((char *)nto +
nto->nto_offset_nexttask);
}
if (nttc == NULL) {
free(ntoh, M_TEMP);
return (ENOENT);
}
sc->ndis_v4tx = nttc->nttc_v4tx;
sc->ndis_v4rx = nttc->nttc_v4rx;
if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_IP_CSUM)
sc->ndis_hwassist |= CSUM_IP;
if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_TCP_CSUM)
sc->ndis_hwassist |= CSUM_TCP;
if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_UDP_CSUM)
sc->ndis_hwassist |= CSUM_UDP;
if (sc->ndis_hwassist)
ifp->if_capabilities |= IFCAP_TXCSUM;
if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_IP_CSUM)
ifp->if_capabilities |= IFCAP_RXCSUM;
if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_TCP_CSUM)
ifp->if_capabilities |= IFCAP_RXCSUM;
if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_UDP_CSUM)
ifp->if_capabilities |= IFCAP_RXCSUM;
free(ntoh, M_TEMP);
return (0);
}
static int
ndis_nettype_chan(uint32_t type)
{
switch (type) {
case NDIS_80211_NETTYPE_11FH: return (IEEE80211_CHAN_FHSS);
case NDIS_80211_NETTYPE_11DS: return (IEEE80211_CHAN_B);
case NDIS_80211_NETTYPE_11OFDM5: return (IEEE80211_CHAN_A);
case NDIS_80211_NETTYPE_11OFDM24: return (IEEE80211_CHAN_G);
}
DPRINTF(("unknown channel nettype %d\n", type));
return (IEEE80211_CHAN_B); /* Default to 11B chan */
}
static int
ndis_nettype_mode(uint32_t type)
{
switch (type) {
case NDIS_80211_NETTYPE_11FH: return (IEEE80211_MODE_FH);
case NDIS_80211_NETTYPE_11DS: return (IEEE80211_MODE_11B);
case NDIS_80211_NETTYPE_11OFDM5: return (IEEE80211_MODE_11A);
case NDIS_80211_NETTYPE_11OFDM24: return (IEEE80211_MODE_11G);
}
DPRINTF(("unknown mode nettype %d\n", type));
return (IEEE80211_MODE_AUTO);
}
/*
* Attach the interface. Allocate softc structures, do ifmedia
* setup and ethernet/BPF attach.
*/
int
-ndis_attach(dev)
- device_t dev;
+ndis_attach(device_t dev)
{
- u_char eaddr[ETHER_ADDR_LEN];
struct ndis_softc *sc;
driver_object *pdrv;
device_object *pdo;
- struct ifnet *ifp = NULL;
- int error = 0, len, mode;
- uint8_t bands = 0;
+ int error = 0, len;
int i;
sc = device_get_softc(dev);
mtx_init(&sc->ndis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
KeInitializeSpinLock(&sc->ndis_rxlock);
KeInitializeSpinLock(&sc->ndisusb_tasklock);
KeInitializeSpinLock(&sc->ndisusb_xferdonelock);
InitializeListHead(&sc->ndis_shlist);
InitializeListHead(&sc->ndisusb_tasklist);
InitializeListHead(&sc->ndisusb_xferdonelist);
callout_init(&sc->ndis_stat_callout, 1);
+ mbufq_init(&sc->ndis_rxqueue, INT_MAX); /* XXXGL: sane maximum */
if (sc->ndis_iftype == PCMCIABus) {
error = ndis_alloc_amem(sc);
if (error) {
device_printf(dev, "failed to allocate "
"attribute memory\n");
goto fail;
}
}
/* Create sysctl registry nodes */
ndis_create_sysctls(sc);
/* Find the PDO for this device instance. */
if (sc->ndis_iftype == PCIBus)
pdrv = windrv_lookup(0, "PCI Bus");
else if (sc->ndis_iftype == PCMCIABus)
pdrv = windrv_lookup(0, "PCCARD Bus");
else
pdrv = windrv_lookup(0, "USB Bus");
pdo = windrv_find_pdo(pdrv, dev);
/*
* Create a new functional device object for this
* device. This is what creates the miniport block
* for this device instance.
*/
if (NdisAddDevice(sc->ndis_dobj, pdo) != STATUS_SUCCESS) {
device_printf(dev, "failed to create FDO!\n");
error = ENXIO;
goto fail;
}
/* Tell the user what version of the API the driver is using. */
device_printf(dev, "NDIS API version: %d.%d\n",
sc->ndis_chars->nmc_version_major,
sc->ndis_chars->nmc_version_minor);
/* Do resource conversion. */
if (sc->ndis_iftype == PCMCIABus || sc->ndis_iftype == PCIBus)
ndis_convert_res(sc);
else
sc->ndis_block->nmb_rlist = NULL;
/* Install our RX and TX interrupt handlers. */
sc->ndis_block->nmb_senddone_func = ndis_txeof_wrap;
sc->ndis_block->nmb_pktind_func = ndis_rxeof_wrap;
sc->ndis_block->nmb_ethrxindicate_func = ndis_rxeof_eth_wrap;
sc->ndis_block->nmb_ethrxdone_func = ndis_rxeof_done_wrap;
sc->ndis_block->nmb_tdcond_func = ndis_rxeof_xfr_done_wrap;
/* Override the status handler so we can detect link changes. */
sc->ndis_block->nmb_status_func = ndis_linksts_wrap;
sc->ndis_block->nmb_statusdone_func = ndis_linksts_done_wrap;
/* Set up work item handlers. */
sc->ndis_tickitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndis_startitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndis_resetitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndis_inputitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndisusb_xferdoneitem =
IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
sc->ndisusb_taskitem =
IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj);
KeInitializeDpc(&sc->ndis_rxdpc, ndis_rxeof_xfr_wrap, sc->ndis_block);
/* Call driver's init routine. */
if (ndis_init_nic(sc)) {
device_printf(dev, "init handler failed\n");
error = ENXIO;
goto fail;
}
/*
- * Get station address from the driver.
- */
- len = sizeof(eaddr);
- ndis_get_info(sc, OID_802_3_CURRENT_ADDRESS, &eaddr, &len);
-
- /*
* Figure out how big to make the TX buffer pool.
*/
-
len = sizeof(sc->ndis_maxpkts);
if (ndis_get_info(sc, OID_GEN_MAXIMUM_SEND_PACKETS,
&sc->ndis_maxpkts, &len)) {
device_printf(dev, "failed to get max TX packets\n");
error = ENXIO;
goto fail;
}
/*
* If this is a deserialized miniport, we don't have
* to honor the OID_GEN_MAXIMUM_SEND_PACKETS result.
*/
if (!NDIS_SERIALIZED(sc->ndis_block))
sc->ndis_maxpkts = NDIS_TXPKTS;
/* Enforce some sanity, just in case. */
if (sc->ndis_maxpkts == 0)
sc->ndis_maxpkts = 10;
sc->ndis_txarray = malloc(sizeof(ndis_packet *) *
sc->ndis_maxpkts, M_DEVBUF, M_NOWAIT|M_ZERO);
/* Allocate a pool of ndis_packets for TX encapsulation. */
NdisAllocatePacketPool(&i, &sc->ndis_txpool,
sc->ndis_maxpkts, PROTOCOL_RESERVED_SIZE_IN_PACKET);
if (i != NDIS_STATUS_SUCCESS) {
sc->ndis_txpool = NULL;
device_printf(dev, "failed to allocate TX packet pool");
error = ENOMEM;
goto fail;
}
sc->ndis_txpending = sc->ndis_maxpkts;
sc->ndis_oidcnt = 0;
/* Get supported oid list. */
ndis_get_supported_oids(sc, &sc->ndis_oids, &sc->ndis_oidcnt);
/* If the NDIS module requested scatter/gather, init maps. */
if (sc->ndis_sc)
ndis_init_dma(sc);
/*
* See if the OID_802_11_CONFIGURATION OID is
* supported by this driver. If it is, then this an 802.11
* wireless driver, and we should set up media for wireless.
*/
for (i = 0; i < sc->ndis_oidcnt; i++)
if (sc->ndis_oids[i] == OID_802_11_CONFIGURATION) {
- sc->ndis_80211++;
+ sc->ndis_80211 = 1;
break;
}
if (sc->ndis_80211)
- ifp = if_alloc(IFT_IEEE80211);
+ error = ndis_80211attach(sc);
else
- ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- error = ENOSPC;
- goto fail;
+ error = ndis_ifattach(sc);
+
+fail:
+ if (error) {
+ ndis_detach(dev);
+ return (error);
}
- sc->ifp = ifp;
- ifp->if_softc = sc;
- /* Check for task offload support. */
- ndis_probe_offload(sc);
+ if (sc->ndis_iftype == PNPBus && ndisusb_halt == 0)
+ return (error);
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ndis_ioctl;
- ifp->if_start = ndis_start;
- ifp->if_init = ndis_init;
- ifp->if_baudrate = 10000000;
- IFQ_SET_MAXLEN(&ifp->if_snd, 50);
- ifp->if_snd.ifq_drv_maxlen = 25;
- IFQ_SET_READY(&ifp->if_snd);
- ifp->if_capenable = ifp->if_capabilities;
- ifp->if_hwassist = sc->ndis_hwassist;
+ DPRINTF(("attach done.\n"));
+ /* We're done talking to the NIC for now; halt it. */
+ ndis_halt_nic(sc);
+ DPRINTF(("halting done.\n"));
- /* Do media setup */
- if (sc->ndis_80211) {
- struct ieee80211com *ic = ifp->if_l2com;
- ndis_80211_rates_ex rates;
- struct ndis_80211_nettype_list *ntl;
- uint32_t arg;
- int r;
+ return (error);
+}
- callout_init(&sc->ndis_scan_callout, 1);
+static int
+ndis_80211attach(struct ndis_softc *sc)
+{
+ struct ieee80211com *ic = &sc->ndis_ic;
+ ndis_80211_rates_ex rates;
+ struct ndis_80211_nettype_list *ntl;
+ uint32_t arg;
+ int mode, i, r, len;
+ uint8_t bands = 0;
- ifp->if_ioctl = ndis_ioctl_80211;
- ic->ic_ifp = ifp;
- ic->ic_softc = sc;
- ic->ic_name = device_get_nameunit(dev);
- ic->ic_opmode = IEEE80211_M_STA;
- ic->ic_phytype = IEEE80211_T_DS;
- ic->ic_caps = IEEE80211_C_8023ENCAP |
- IEEE80211_C_STA | IEEE80211_C_IBSS;
- setbit(ic->ic_modecaps, IEEE80211_MODE_AUTO);
- len = 0;
- r = ndis_get_info(sc, OID_802_11_NETWORK_TYPES_SUPPORTED,
- NULL, &len);
- if (r != ENOSPC)
- goto nonettypes;
- ntl = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
- r = ndis_get_info(sc, OID_802_11_NETWORK_TYPES_SUPPORTED,
- ntl, &len);
- if (r != 0) {
- free(ntl, M_DEVBUF);
- goto nonettypes;
- }
+ callout_init(&sc->ndis_scan_callout, 1);
- for (i = 0; i < ntl->ntl_items; i++) {
- mode = ndis_nettype_mode(ntl->ntl_type[i]);
- if (mode) {
- setbit(ic->ic_modecaps, mode);
- setbit(&bands, mode);
- } else
- device_printf(dev, "Unknown nettype %d\n",
- ntl->ntl_type[i]);
- }
+ ic->ic_softc = sc;
+ ic->ic_ioctl = ndis_80211ioctl;
+ ic->ic_name = device_get_nameunit(sc->ndis_dev);
+ ic->ic_opmode = IEEE80211_M_STA;
+ ic->ic_phytype = IEEE80211_T_DS;
+ ic->ic_caps = IEEE80211_C_8023ENCAP |
+ IEEE80211_C_STA | IEEE80211_C_IBSS;
+ setbit(ic->ic_modecaps, IEEE80211_MODE_AUTO);
+ len = 0;
+ r = ndis_get_info(sc, OID_802_11_NETWORK_TYPES_SUPPORTED, NULL, &len);
+ if (r != ENOSPC)
+ goto nonettypes;
+ ntl = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
+ r = ndis_get_info(sc, OID_802_11_NETWORK_TYPES_SUPPORTED, ntl, &len);
+ if (r != 0) {
free(ntl, M_DEVBUF);
+ goto nonettypes;
+ }
+
+ for (i = 0; i < ntl->ntl_items; i++) {
+ mode = ndis_nettype_mode(ntl->ntl_type[i]);
+ if (mode) {
+ setbit(ic->ic_modecaps, mode);
+ setbit(&bands, mode);
+ } else
+ device_printf(sc->ndis_dev, "Unknown nettype %d\n",
+ ntl->ntl_type[i]);
+ }
+ free(ntl, M_DEVBUF);
nonettypes:
- /* Default to 11b channels if the card did not supply any */
- if (bands == 0) {
- setbit(ic->ic_modecaps, IEEE80211_MODE_11B);
- setbit(&bands, IEEE80211_MODE_11B);
- }
- len = sizeof(rates);
- bzero((char *)&rates, len);
- r = ndis_get_info(sc, OID_802_11_SUPPORTED_RATES,
- (void *)rates, &len);
- if (r)
- device_printf(dev, "get rates failed: 0x%x\n", r);
- /*
- * Since the supported rates only up to 8 can be supported,
- * if this is not 802.11b we're just going to be faking it
- * all up to heck.
- */
+ /* Default to 11b channels if the card did not supply any */
+ if (bands == 0) {
+ setbit(ic->ic_modecaps, IEEE80211_MODE_11B);
+ setbit(&bands, IEEE80211_MODE_11B);
+ }
+ len = sizeof(rates);
+ bzero((char *)&rates, len);
+ r = ndis_get_info(sc, OID_802_11_SUPPORTED_RATES, (void *)rates, &len);
+ if (r != 0)
+ device_printf(sc->ndis_dev, "get rates failed: 0x%x\n", r);
+ /*
+ * Since the supported rates only up to 8 can be supported,
+ * if this is not 802.11b we're just going to be faking it
+ * all up to heck.
+ */
#define TESTSETRATE(x, y) \
do { \
int i; \
for (i = 0; i < ic->ic_sup_rates[x].rs_nrates; i++) { \
if (ic->ic_sup_rates[x].rs_rates[i] == (y)) \
break; \
} \
if (i == ic->ic_sup_rates[x].rs_nrates) { \
ic->ic_sup_rates[x].rs_rates[i] = (y); \
ic->ic_sup_rates[x].rs_nrates++; \
} \
} while (0)
#define SETRATE(x, y) \
ic->ic_sup_rates[x].rs_rates[ic->ic_sup_rates[x].rs_nrates] = (y)
#define INCRATE(x) \
ic->ic_sup_rates[x].rs_nrates++
- ic->ic_curmode = IEEE80211_MODE_AUTO;
- if (isset(ic->ic_modecaps, IEEE80211_MODE_11A))
- ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates = 0;
- if (isset(ic->ic_modecaps, IEEE80211_MODE_11B))
- ic->ic_sup_rates[IEEE80211_MODE_11B].rs_nrates = 0;
- if (isset(ic->ic_modecaps, IEEE80211_MODE_11G))
- ic->ic_sup_rates[IEEE80211_MODE_11G].rs_nrates = 0;
- for (i = 0; i < len; i++) {
- switch (rates[i] & IEEE80211_RATE_VAL) {
- case 2:
- case 4:
- case 11:
- case 10:
- case 22:
- if (isclr(ic->ic_modecaps, IEEE80211_MODE_11B)) {
- /* Lazy-init 802.11b. */
- setbit(ic->ic_modecaps,
- IEEE80211_MODE_11B);
- ic->ic_sup_rates[IEEE80211_MODE_11B].
- rs_nrates = 0;
- }
- SETRATE(IEEE80211_MODE_11B, rates[i]);
- INCRATE(IEEE80211_MODE_11B);
- break;
- default:
- if (isset(ic->ic_modecaps, IEEE80211_MODE_11A)) {
- SETRATE(IEEE80211_MODE_11A, rates[i]);
- INCRATE(IEEE80211_MODE_11A);
- }
- if (isset(ic->ic_modecaps, IEEE80211_MODE_11G)) {
- SETRATE(IEEE80211_MODE_11G, rates[i]);
- INCRATE(IEEE80211_MODE_11G);
- }
- break;
+ ic->ic_curmode = IEEE80211_MODE_AUTO;
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11A))
+ ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates = 0;
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11B))
+ ic->ic_sup_rates[IEEE80211_MODE_11B].rs_nrates = 0;
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11G))
+ ic->ic_sup_rates[IEEE80211_MODE_11G].rs_nrates = 0;
+ for (i = 0; i < len; i++) {
+ switch (rates[i] & IEEE80211_RATE_VAL) {
+ case 2:
+ case 4:
+ case 11:
+ case 10:
+ case 22:
+ if (isclr(ic->ic_modecaps, IEEE80211_MODE_11B)) {
+ /* Lazy-init 802.11b. */
+ setbit(ic->ic_modecaps, IEEE80211_MODE_11B);
+ ic->ic_sup_rates[IEEE80211_MODE_11B].
+ rs_nrates = 0;
}
+ SETRATE(IEEE80211_MODE_11B, rates[i]);
+ INCRATE(IEEE80211_MODE_11B);
+ break;
+ default:
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11A)) {
+ SETRATE(IEEE80211_MODE_11A, rates[i]);
+ INCRATE(IEEE80211_MODE_11A);
+ }
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11G)) {
+ SETRATE(IEEE80211_MODE_11G, rates[i]);
+ INCRATE(IEEE80211_MODE_11G);
+ }
+ break;
}
+ }
- /*
- * If the hardware supports 802.11g, it most
- * likely supports 802.11b and all of the
- * 802.11b and 802.11g speeds, so maybe we can
- * just cheat here. Just how in the heck do
- * we detect turbo modes, though?
- */
- if (isset(ic->ic_modecaps, IEEE80211_MODE_11B)) {
- TESTSETRATE(IEEE80211_MODE_11B,
- IEEE80211_RATE_BASIC|2);
- TESTSETRATE(IEEE80211_MODE_11B,
- IEEE80211_RATE_BASIC|4);
- TESTSETRATE(IEEE80211_MODE_11B,
- IEEE80211_RATE_BASIC|11);
- TESTSETRATE(IEEE80211_MODE_11B,
- IEEE80211_RATE_BASIC|22);
- }
- if (isset(ic->ic_modecaps, IEEE80211_MODE_11G)) {
- TESTSETRATE(IEEE80211_MODE_11G, 48);
- TESTSETRATE(IEEE80211_MODE_11G, 72);
- TESTSETRATE(IEEE80211_MODE_11G, 96);
- TESTSETRATE(IEEE80211_MODE_11G, 108);
- }
- if (isset(ic->ic_modecaps, IEEE80211_MODE_11A)) {
- TESTSETRATE(IEEE80211_MODE_11A, 48);
- TESTSETRATE(IEEE80211_MODE_11A, 72);
- TESTSETRATE(IEEE80211_MODE_11A, 96);
- TESTSETRATE(IEEE80211_MODE_11A, 108);
- }
+ /*
+ * If the hardware supports 802.11g, it most
+ * likely supports 802.11b and all of the
+ * 802.11b and 802.11g speeds, so maybe we can
+ * just cheat here. Just how in the heck do
+ * we detect turbo modes, though?
+ */
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11B)) {
+ TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|2);
+ TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|4);
+ TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|11);
+ TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|22);
+ }
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11G)) {
+ TESTSETRATE(IEEE80211_MODE_11G, 48);
+ TESTSETRATE(IEEE80211_MODE_11G, 72);
+ TESTSETRATE(IEEE80211_MODE_11G, 96);
+ TESTSETRATE(IEEE80211_MODE_11G, 108);
+ }
+ if (isset(ic->ic_modecaps, IEEE80211_MODE_11A)) {
+ TESTSETRATE(IEEE80211_MODE_11A, 48);
+ TESTSETRATE(IEEE80211_MODE_11A, 72);
+ TESTSETRATE(IEEE80211_MODE_11A, 96);
+ TESTSETRATE(IEEE80211_MODE_11A, 108);
+ }
+
#undef SETRATE
#undef INCRATE
- ieee80211_init_channels(ic, NULL, &bands);
+#undef TESTSETRATE
- /*
- * To test for WPA support, we need to see if we can
- * set AUTHENTICATION_MODE to WPA and read it back
- * successfully.
- */
- i = sizeof(arg);
- arg = NDIS_80211_AUTHMODE_WPA;
- r = ndis_set_info(sc,
- OID_802_11_AUTHENTICATION_MODE, &arg, &i);
- if (r == 0) {
- r = ndis_get_info(sc,
- OID_802_11_AUTHENTICATION_MODE, &arg, &i);
- if (r == 0 && arg == NDIS_80211_AUTHMODE_WPA)
- ic->ic_caps |= IEEE80211_C_WPA;
- }
+ ieee80211_init_channels(ic, NULL, &bands);
- /*
- * To test for supported ciphers, we set each
- * available encryption type in descending order.
- * If ENC3 works, then we have WEP, TKIP and AES.
- * If only ENC2 works, then we have WEP and TKIP.
- * If only ENC1 works, then we have just WEP.
- */
- i = sizeof(arg);
- arg = NDIS_80211_WEPSTAT_ENC3ENABLED;
- r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i);
- if (r == 0) {
- ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
- | IEEE80211_CRYPTO_TKIP
- | IEEE80211_CRYPTO_AES_CCM;
- goto got_crypto;
- }
- arg = NDIS_80211_WEPSTAT_ENC2ENABLED;
- r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i);
- if (r == 0) {
- ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
- | IEEE80211_CRYPTO_TKIP;
- goto got_crypto;
- }
- arg = NDIS_80211_WEPSTAT_ENC1ENABLED;
- r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i);
- if (r == 0)
- ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
+ /*
+ * To test for WPA support, we need to see if we can
+ * set AUTHENTICATION_MODE to WPA and read it back
+ * successfully.
+ */
+ i = sizeof(arg);
+ arg = NDIS_80211_AUTHMODE_WPA;
+ r = ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i);
+ if (r == 0) {
+ r = ndis_get_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i);
+ if (r == 0 && arg == NDIS_80211_AUTHMODE_WPA)
+ ic->ic_caps |= IEEE80211_C_WPA;
+ }
+
+ /*
+ * To test for supported ciphers, we set each
+ * available encryption type in descending order.
+ * If ENC3 works, then we have WEP, TKIP and AES.
+ * If only ENC2 works, then we have WEP and TKIP.
+ * If only ENC1 works, then we have just WEP.
+ */
+ i = sizeof(arg);
+ arg = NDIS_80211_WEPSTAT_ENC3ENABLED;
+ r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i);
+ if (r == 0) {
+ ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
+ | IEEE80211_CRYPTO_TKIP
+ | IEEE80211_CRYPTO_AES_CCM;
+ goto got_crypto;
+ }
+ arg = NDIS_80211_WEPSTAT_ENC2ENABLED;
+ r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i);
+ if (r == 0) {
+ ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
+ | IEEE80211_CRYPTO_TKIP;
+ goto got_crypto;
+ }
+ arg = NDIS_80211_WEPSTAT_ENC1ENABLED;
+ r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i);
+ if (r == 0)
+ ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
got_crypto:
- i = sizeof(arg);
- r = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &i);
- if (r == 0)
- ic->ic_caps |= IEEE80211_C_PMGT;
+ i = sizeof(arg);
+ r = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &i);
+ if (r == 0)
+ ic->ic_caps |= IEEE80211_C_PMGT;
- r = ndis_get_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &i);
- if (r == 0)
- ic->ic_caps |= IEEE80211_C_TXPMGT;
+ r = ndis_get_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &i);
+ if (r == 0)
+ ic->ic_caps |= IEEE80211_C_TXPMGT;
- ieee80211_ifattach(ic, eaddr);
- ic->ic_raw_xmit = ndis_raw_xmit;
- ic->ic_scan_start = ndis_scan_start;
- ic->ic_scan_end = ndis_scan_end;
- ic->ic_set_channel = ndis_set_channel;
- ic->ic_scan_curchan = ndis_scan_curchan;
- ic->ic_scan_mindwell = ndis_scan_mindwell;
- ic->ic_bsschan = IEEE80211_CHAN_ANYC;
- //ic->ic_bss->ni_chan = ic->ic_bsschan;
- ic->ic_vap_create = ndis_vap_create;
- ic->ic_vap_delete = ndis_vap_delete;
- ic->ic_update_mcast = ndis_update_mcast;
- ic->ic_update_promisc = ndis_update_promisc;
+ /*
+ * Get station address from the driver.
+ */
+ len = sizeof(ic->ic_macaddr);
+ ndis_get_info(sc, OID_802_3_CURRENT_ADDRESS, &ic->ic_macaddr, &len);
- if (bootverbose)
- ieee80211_announce(ic);
+ ieee80211_ifattach(ic);
+ ic->ic_raw_xmit = ndis_raw_xmit;
+ ic->ic_scan_start = ndis_scan_start;
+ ic->ic_scan_end = ndis_scan_end;
+ ic->ic_set_channel = ndis_set_channel;
+ ic->ic_scan_curchan = ndis_scan_curchan;
+ ic->ic_scan_mindwell = ndis_scan_mindwell;
+ ic->ic_bsschan = IEEE80211_CHAN_ANYC;
+ ic->ic_vap_create = ndis_vap_create;
+ ic->ic_vap_delete = ndis_vap_delete;
+ ic->ic_update_mcast = ndis_update_mcast;
+ ic->ic_update_promisc = ndis_update_promisc;
+ ic->ic_transmit = ndis_80211transmit;
+ ic->ic_parent = ndis_80211parent;
- } else {
- ifmedia_init(&sc->ifmedia, IFM_IMASK, ndis_ifmedia_upd,
- ndis_ifmedia_sts);
- ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
- ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
- ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
- ifmedia_add(&sc->ifmedia,
- IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
- ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
- ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO);
- ether_ifattach(ifp, eaddr);
- }
+ if (bootverbose)
+ ieee80211_announce(ic);
-fail:
- if (error) {
- ndis_detach(dev);
- return (error);
- }
+ return (0);
+}
- if (sc->ndis_iftype == PNPBus && ndisusb_halt == 0)
- return (error);
+static int
+ndis_ifattach(struct ndis_softc *sc)
+{
+ struct ifnet *ifp;
+ u_char eaddr[ETHER_ADDR_LEN];
+ int len;
- DPRINTF(("attach done.\n"));
- /* We're done talking to the NIC for now; halt it. */
- ndis_halt_nic(sc);
- DPRINTF(("halting done.\n"));
+ ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL)
+ return (ENOSPC);
+ sc->ifp = ifp;
+ ifp->if_softc = sc;
- return (error);
+ /* Check for task offload support. */
+ ndis_probe_offload(sc);
+
+ /*
+ * Get station address from the driver.
+ */
+ len = sizeof(eaddr);
+ ndis_get_info(sc, OID_802_3_CURRENT_ADDRESS, eaddr, &len);
+
+ if_initname(ifp, device_get_name(sc->ndis_dev),
+ device_get_unit(sc->ndis_dev));
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = ndis_ioctl;
+ ifp->if_start = ndis_start;
+ ifp->if_init = ndis_init;
+ ifp->if_baudrate = 10000000;
+ IFQ_SET_MAXLEN(&ifp->if_snd, 50);
+ ifp->if_snd.ifq_drv_maxlen = 25;
+ IFQ_SET_READY(&ifp->if_snd);
+ ifp->if_capenable = ifp->if_capabilities;
+ ifp->if_hwassist = sc->ndis_hwassist;
+
+ ifmedia_init(&sc->ifmedia, IFM_IMASK, ndis_ifmedia_upd,
+ ndis_ifmedia_sts);
+ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
+ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
+ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL);
+ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
+ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO);
+ ether_ifattach(ifp, eaddr);
+
+ return (0);
}
static struct ieee80211vap *
ndis_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ndis_vap *nvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
- nvp = (struct ndis_vap *) malloc(sizeof(struct ndis_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (nvp == NULL)
- return NULL;
+ nvp = malloc(sizeof(struct ndis_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &nvp->vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override with driver methods */
nvp->newstate = vap->iv_newstate;
vap->iv_newstate = ndis_newstate;
/* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, ndis_media_status);
+ ieee80211_vap_attach(vap, ieee80211_media_change, ndis_media_status,
+ mac);
ic->ic_opmode = opmode;
/* install key handing routines */
vap->iv_key_set = ndis_add_key;
vap->iv_key_delete = ndis_del_key;
return vap;
}
static void
ndis_vap_delete(struct ieee80211vap *vap)
{
struct ndis_vap *nvp = NDIS_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct ndis_softc *sc = ifp->if_softc;
+ struct ndis_softc *sc = ic->ic_softc;
ndis_stop(sc);
callout_drain(&sc->ndis_scan_callout);
ieee80211_vap_detach(vap);
free(nvp, M_80211_VAP);
}
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
* the error case in attach and the normal detach case so it needs
* to be careful about only freeing resources that have actually been
* allocated.
*/
int
-ndis_detach(dev)
- device_t dev;
+ndis_detach(device_t dev)
{
- struct ndis_softc *sc;
struct ifnet *ifp;
+ struct ndis_softc *sc;
driver_object *drv;
sc = device_get_softc(dev);
NDIS_LOCK(sc);
- ifp = sc->ifp;
+ if (!sc->ndis_80211)
+ ifp = sc->ifp;
+ else
+ ifp = NULL;
if (ifp != NULL)
ifp->if_flags &= ~IFF_UP;
-
if (device_is_attached(dev)) {
NDIS_UNLOCK(sc);
ndis_stop(sc);
- if (ifp != NULL) {
- if (sc->ndis_80211)
- ieee80211_ifdetach(ifp->if_l2com);
- else
- ether_ifdetach(ifp);
- }
+ if (sc->ndis_80211)
+ ieee80211_ifdetach(&sc->ndis_ic);
+ else if (ifp != NULL)
+ ether_ifdetach(ifp);
} else
NDIS_UNLOCK(sc);
if (sc->ndis_tickitem != NULL)
IoFreeWorkItem(sc->ndis_tickitem);
if (sc->ndis_startitem != NULL)
IoFreeWorkItem(sc->ndis_startitem);
if (sc->ndis_resetitem != NULL)
IoFreeWorkItem(sc->ndis_resetitem);
if (sc->ndis_inputitem != NULL)
IoFreeWorkItem(sc->ndis_inputitem);
if (sc->ndisusb_xferdoneitem != NULL)
IoFreeWorkItem(sc->ndisusb_xferdoneitem);
if (sc->ndisusb_taskitem != NULL)
IoFreeWorkItem(sc->ndisusb_taskitem);
bus_generic_detach(dev);
ndis_unload_driver(sc);
if (sc->ndis_irq)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ndis_irq);
if (sc->ndis_res_io)
bus_release_resource(dev, SYS_RES_IOPORT,
sc->ndis_io_rid, sc->ndis_res_io);
if (sc->ndis_res_mem)
bus_release_resource(dev, SYS_RES_MEMORY,
sc->ndis_mem_rid, sc->ndis_res_mem);
if (sc->ndis_res_altmem)
bus_release_resource(dev, SYS_RES_MEMORY,
sc->ndis_altmem_rid, sc->ndis_res_altmem);
if (ifp != NULL)
if_free(ifp);
if (sc->ndis_iftype == PCMCIABus)
ndis_free_amem(sc);
if (sc->ndis_sc)
ndis_destroy_dma(sc);
if (sc->ndis_txarray)
free(sc->ndis_txarray, M_DEVBUF);
if (!sc->ndis_80211)
ifmedia_removeall(&sc->ifmedia);
if (sc->ndis_txpool != NULL)
NdisFreePacketPool(sc->ndis_txpool);
/* Destroy the PDO for this device. */
if (sc->ndis_iftype == PCIBus)
drv = windrv_lookup(0, "PCI Bus");
else if (sc->ndis_iftype == PCMCIABus)
drv = windrv_lookup(0, "PCCARD Bus");
else
drv = windrv_lookup(0, "USB Bus");
if (drv == NULL)
panic("couldn't find driver object");
windrv_destroy_pdo(drv, dev);
if (sc->ndis_iftype == PCIBus)
bus_dma_tag_destroy(sc->ndis_parent_tag);
return (0);
}
int
ndis_suspend(dev)
device_t dev;
{
struct ndis_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
ifp = sc->ifp;
#ifdef notdef
if (NDIS_INITIALIZED(sc))
ndis_stop(sc);
#endif
return (0);
}
int
ndis_resume(dev)
device_t dev;
{
struct ndis_softc *sc;
struct ifnet *ifp;
sc = device_get_softc(dev);
ifp = sc->ifp;
if (NDIS_INITIALIZED(sc))
ndis_init(sc);
return (0);
}
/*
* The following bunch of routines are here to support drivers that
* use the NdisMEthIndicateReceive()/MiniportTransferData() mechanism.
* The NdisMEthIndicateReceive() handler runs at DISPATCH_LEVEL for
* serialized miniports, or IRQL <= DISPATCH_LEVEL for deserialized
* miniports.
*/
static void
ndis_rxeof_eth(adapter, ctx, addr, hdr, hdrlen, lookahead, lookaheadlen, pktlen)
ndis_handle adapter;
ndis_handle ctx;
char *addr;
void *hdr;
uint32_t hdrlen;
void *lookahead;
uint32_t lookaheadlen;
uint32_t pktlen;
{
ndis_miniport_block *block;
uint8_t irql = 0;
uint32_t status;
ndis_buffer *b;
ndis_packet *p;
struct mbuf *m;
ndis_ethpriv *priv;
block = adapter;
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return;
/* Save the data provided to us so far. */
m->m_len = lookaheadlen + hdrlen;
m->m_pkthdr.len = pktlen + hdrlen;
m->m_next = NULL;
m_copyback(m, 0, hdrlen, hdr);
m_copyback(m, hdrlen, lookaheadlen, lookahead);
/* Now create a fake NDIS_PACKET to hold the data */
NdisAllocatePacket(&status, &p, block->nmb_rxpool);
if (status != NDIS_STATUS_SUCCESS) {
m_freem(m);
return;
}
p->np_m0 = m;
b = IoAllocateMdl(m->m_data, m->m_pkthdr.len, FALSE, FALSE, NULL);
if (b == NULL) {
NdisFreePacket(p);
m_freem(m);
return;
}
p->np_private.npp_head = p->np_private.npp_tail = b;
p->np_private.npp_totlen = m->m_pkthdr.len;
/* Save the packet RX context somewhere. */
priv = (ndis_ethpriv *)&p->np_protocolreserved;
priv->nep_ctx = ctx;
if (!NDIS_SERIALIZED(block))
KeAcquireSpinLock(&block->nmb_lock, &irql);
InsertTailList((&block->nmb_packetlist), (&p->np_list));
if (!NDIS_SERIALIZED(block))
KeReleaseSpinLock(&block->nmb_lock, irql);
}
/*
* NdisMEthIndicateReceiveComplete() handler, runs at DISPATCH_LEVEL
* for serialized miniports, or IRQL <= DISPATCH_LEVEL for deserialized
* miniports.
*/
static void
ndis_rxeof_done(adapter)
ndis_handle adapter;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
block = adapter;
/* Schedule transfer/RX of queued packets. */
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
KeInsertQueueDpc(&sc->ndis_rxdpc, NULL, NULL);
}
/*
* MiniportTransferData() handler, runs at DISPATCH_LEVEL.
*/
static void
ndis_rxeof_xfr(dpc, adapter, sysarg1, sysarg2)
kdpc *dpc;
ndis_handle adapter;
void *sysarg1;
void *sysarg2;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
ndis_packet *p;
list_entry *l;
uint32_t status;
ndis_ethpriv *priv;
struct ifnet *ifp;
struct mbuf *m;
block = adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
KeAcquireSpinLockAtDpcLevel(&block->nmb_lock);
l = block->nmb_packetlist.nle_flink;
while(!IsListEmpty(&block->nmb_packetlist)) {
l = RemoveHeadList((&block->nmb_packetlist));
p = CONTAINING_RECORD(l, ndis_packet, np_list);
InitializeListHead((&p->np_list));
priv = (ndis_ethpriv *)&p->np_protocolreserved;
m = p->np_m0;
p->np_softc = sc;
p->np_m0 = NULL;
KeReleaseSpinLockFromDpcLevel(&block->nmb_lock);
status = MSCALL6(sc->ndis_chars->nmc_transferdata_func,
p, &p->np_private.npp_totlen, block, priv->nep_ctx,
m->m_len, m->m_pkthdr.len - m->m_len);
KeAcquireSpinLockAtDpcLevel(&block->nmb_lock);
/*
* If status is NDIS_STATUS_PENDING, do nothing and
* wait for a callback to the ndis_rxeof_xfr_done()
* handler.
*/
m->m_len = m->m_pkthdr.len;
m->m_pkthdr.rcvif = ifp;
if (status == NDIS_STATUS_SUCCESS) {
IoFreeMdl(p->np_private.npp_head);
NdisFreePacket(p);
KeAcquireSpinLockAtDpcLevel(&sc->ndis_rxlock);
- _IF_ENQUEUE(&sc->ndis_rxqueue, m);
+ mbufq_enqueue(&sc->ndis_rxqueue, m);
KeReleaseSpinLockFromDpcLevel(&sc->ndis_rxlock);
IoQueueWorkItem(sc->ndis_inputitem,
(io_workitem_func)ndis_inputtask_wrap,
- WORKQUEUE_CRITICAL, ifp);
+ WORKQUEUE_CRITICAL, sc);
}
if (status == NDIS_STATUS_FAILURE)
m_freem(m);
/* Advance to next packet */
l = block->nmb_packetlist.nle_flink;
}
KeReleaseSpinLockFromDpcLevel(&block->nmb_lock);
}
/*
* NdisMTransferDataComplete() handler, runs at DISPATCH_LEVEL.
*/
static void
ndis_rxeof_xfr_done(adapter, packet, status, len)
ndis_handle adapter;
ndis_packet *packet;
uint32_t status;
uint32_t len;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
struct ifnet *ifp;
struct mbuf *m;
block = adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
m = packet->np_m0;
IoFreeMdl(packet->np_private.npp_head);
NdisFreePacket(packet);
if (status != NDIS_STATUS_SUCCESS) {
m_freem(m);
return;
}
m->m_len = m->m_pkthdr.len;
m->m_pkthdr.rcvif = ifp;
KeAcquireSpinLockAtDpcLevel(&sc->ndis_rxlock);
- _IF_ENQUEUE(&sc->ndis_rxqueue, m);
+ mbufq_enqueue(&sc->ndis_rxqueue, m);
KeReleaseSpinLockFromDpcLevel(&sc->ndis_rxlock);
IoQueueWorkItem(sc->ndis_inputitem,
(io_workitem_func)ndis_inputtask_wrap,
- WORKQUEUE_CRITICAL, ifp);
+ WORKQUEUE_CRITICAL, sc);
}
/*
* A frame has been uploaded: pass the resulting mbuf chain up to
* the higher level protocols.
*
* When handling received NDIS packets, the 'status' field in the
* out-of-band portion of the ndis_packet has special meaning. In the
* most common case, the underlying NDIS driver will set this field
* to NDIS_STATUS_SUCCESS, which indicates that it's ok for us to
* take posession of it. We then change the status field to
* NDIS_STATUS_PENDING to tell the driver that we now own the packet,
* and that we will return it at some point in the future via the
* return packet handler.
*
* If the driver hands us a packet with a status of NDIS_STATUS_RESOURCES,
* this means the driver is running out of packet/buffer resources and
* wants to maintain ownership of the packet. In this case, we have to
* copy the packet data into local storage and let the driver keep the
* packet.
*/
static void
ndis_rxeof(adapter, packets, pktcnt)
ndis_handle adapter;
ndis_packet **packets;
uint32_t pktcnt;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
ndis_packet *p;
uint32_t s;
ndis_tcpip_csum *csum;
struct ifnet *ifp;
struct mbuf *m0, *m;
int i;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
/*
* There's a slim chance the driver may indicate some packets
* before we're completely ready to handle them. If we detect this,
* we need to return them to the miniport and ignore them.
*/
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!sc->ndis_running) {
for (i = 0; i < pktcnt; i++) {
p = packets[i];
if (p->np_oob.npo_status == NDIS_STATUS_SUCCESS) {
p->np_refcnt++;
(void)ndis_return_packet(NULL ,p, block);
}
}
return;
}
for (i = 0; i < pktcnt; i++) {
p = packets[i];
/* Stash the softc here so ptom can use it. */
p->np_softc = sc;
if (ndis_ptom(&m0, p)) {
device_printf(sc->ndis_dev, "ptom failed\n");
if (p->np_oob.npo_status == NDIS_STATUS_SUCCESS)
(void)ndis_return_packet(NULL, p, block);
} else {
#ifdef notdef
if (p->np_oob.npo_status == NDIS_STATUS_RESOURCES) {
m = m_dup(m0, M_NOWAIT);
/*
* NOTE: we want to destroy the mbuf here, but
* we don't actually want to return it to the
* driver via the return packet handler. By
* bumping np_refcnt, we can prevent the
* ndis_return_packet() routine from actually
* doing anything.
*/
p->np_refcnt++;
m_freem(m0);
if (m == NULL)
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
else
m0 = m;
} else
p->np_oob.npo_status = NDIS_STATUS_PENDING;
#endif
m = m_dup(m0, M_NOWAIT);
if (p->np_oob.npo_status == NDIS_STATUS_RESOURCES)
p->np_refcnt++;
else
p->np_oob.npo_status = NDIS_STATUS_PENDING;
m_freem(m0);
if (m == NULL) {
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
continue;
}
m0 = m;
m0->m_pkthdr.rcvif = ifp;
/* Deal with checksum offload. */
if (ifp->if_capenable & IFCAP_RXCSUM &&
p->np_ext.npe_info[ndis_tcpipcsum_info] != NULL) {
s = (uintptr_t)
p->np_ext.npe_info[ndis_tcpipcsum_info];
csum = (ndis_tcpip_csum *)&s;
if (csum->u.ntc_rxflags &
NDIS_RXCSUM_IP_PASSED)
m0->m_pkthdr.csum_flags |=
CSUM_IP_CHECKED|CSUM_IP_VALID;
if (csum->u.ntc_rxflags &
(NDIS_RXCSUM_TCP_PASSED |
NDIS_RXCSUM_UDP_PASSED)) {
m0->m_pkthdr.csum_flags |=
CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
m0->m_pkthdr.csum_data = 0xFFFF;
}
}
KeAcquireSpinLockAtDpcLevel(&sc->ndis_rxlock);
- _IF_ENQUEUE(&sc->ndis_rxqueue, m0);
+ mbufq_enqueue(&sc->ndis_rxqueue, m0);
KeReleaseSpinLockFromDpcLevel(&sc->ndis_rxlock);
IoQueueWorkItem(sc->ndis_inputitem,
(io_workitem_func)ndis_inputtask_wrap,
- WORKQUEUE_CRITICAL, ifp);
+ WORKQUEUE_CRITICAL, sc);
}
}
}
/*
* This routine is run at PASSIVE_LEVEL. We use this routine to pass
* packets into the stack in order to avoid calling (*ifp->if_input)()
* with any locks held (at DISPATCH_LEVEL, we'll be holding the
* 'dispatch level' per-cpu sleep lock).
*/
static void
-ndis_inputtask(dobj, arg)
- device_object *dobj;
- void *arg;
+ndis_inputtask(device_object *dobj, void *arg)
{
ndis_miniport_block *block;
- struct ifnet *ifp;
- struct ndis_softc *sc;
+ struct ndis_softc *sc = arg;
struct mbuf *m;
- struct ieee80211com *ic;
- struct ieee80211vap *vap;
uint8_t irql;
- ifp = arg;
- sc = ifp->if_softc;
- ic = ifp->if_l2com;
- vap = TAILQ_FIRST(&ic->ic_vaps);
block = dobj->do_devext;
KeAcquireSpinLock(&sc->ndis_rxlock, &irql);
- while(1) {
- _IF_DEQUEUE(&sc->ndis_rxqueue, m);
- if (m == NULL)
- break;
+ while ((m = mbufq_dequeue(&sc->ndis_rxqueue)) != NULL) {
KeReleaseSpinLock(&sc->ndis_rxlock, irql);
- if ((sc->ndis_80211 != 0) && (vap != NULL))
- vap->iv_deliver_data(vap, vap->iv_bss, m);
- else
+ if ((sc->ndis_80211 != 0)) {
+ struct ieee80211com *ic = &sc->ndis_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ if (vap != NULL)
+ vap->iv_deliver_data(vap, vap->iv_bss, m);
+ } else {
+ struct ifnet *ifp = sc->ifp;
+
(*ifp->if_input)(ifp, m);
+ }
KeAcquireSpinLock(&sc->ndis_rxlock, &irql);
}
KeReleaseSpinLock(&sc->ndis_rxlock, irql);
}
/*
* A frame was downloaded to the chip. It's safe for us to clean up
* the list buffers.
*/
static void
ndis_txeof(adapter, packet, status)
ndis_handle adapter;
ndis_packet *packet;
ndis_status status;
{
struct ndis_softc *sc;
ndis_miniport_block *block;
struct ifnet *ifp;
int idx;
struct mbuf *m;
block = (ndis_miniport_block *)adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
m = packet->np_m0;
idx = packet->np_txidx;
if (sc->ndis_sc)
bus_dmamap_unload(sc->ndis_ttag, sc->ndis_tmaps[idx]);
ndis_free_packet(packet);
m_freem(m);
NDIS_LOCK(sc);
sc->ndis_txarray[idx] = NULL;
sc->ndis_txpending++;
if (status == NDIS_STATUS_SUCCESS)
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
else
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
sc->ndis_tx_timer = 0;
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
NDIS_UNLOCK(sc);
IoQueueWorkItem(sc->ndis_startitem,
(io_workitem_func)ndis_starttask_wrap,
WORKQUEUE_CRITICAL, ifp);
}
static void
ndis_linksts(adapter, status, sbuf, slen)
ndis_handle adapter;
ndis_status status;
void *sbuf;
uint32_t slen;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
block = adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
sc->ndis_sts = status;
/* Event list is all full up, drop this one. */
NDIS_LOCK(sc);
if (sc->ndis_evt[sc->ndis_evtpidx].ne_sts) {
NDIS_UNLOCK(sc);
return;
}
/* Cache the event. */
if (slen) {
sc->ndis_evt[sc->ndis_evtpidx].ne_buf = malloc(slen,
M_TEMP, M_NOWAIT);
if (sc->ndis_evt[sc->ndis_evtpidx].ne_buf == NULL) {
NDIS_UNLOCK(sc);
return;
}
bcopy((char *)sbuf,
sc->ndis_evt[sc->ndis_evtpidx].ne_buf, slen);
}
sc->ndis_evt[sc->ndis_evtpidx].ne_sts = status;
sc->ndis_evt[sc->ndis_evtpidx].ne_len = slen;
NDIS_EVTINC(sc->ndis_evtpidx);
NDIS_UNLOCK(sc);
}
static void
ndis_linksts_done(adapter)
ndis_handle adapter;
{
ndis_miniport_block *block;
struct ndis_softc *sc;
struct ifnet *ifp;
block = adapter;
sc = device_get_softc(block->nmb_physdeviceobj->do_devext);
ifp = sc->ifp;
if (!NDIS_INITIALIZED(sc))
return;
switch (sc->ndis_sts) {
case NDIS_STATUS_MEDIA_CONNECT:
IoQueueWorkItem(sc->ndis_tickitem,
(io_workitem_func)ndis_ticktask_wrap,
WORKQUEUE_CRITICAL, sc);
IoQueueWorkItem(sc->ndis_startitem,
(io_workitem_func)ndis_starttask_wrap,
WORKQUEUE_CRITICAL, ifp);
break;
case NDIS_STATUS_MEDIA_DISCONNECT:
if (sc->ndis_link)
IoQueueWorkItem(sc->ndis_tickitem,
(io_workitem_func)ndis_ticktask_wrap,
WORKQUEUE_CRITICAL, sc);
break;
default:
break;
}
}
static void
ndis_tick(xsc)
void *xsc;
{
struct ndis_softc *sc;
sc = xsc;
if (sc->ndis_hang_timer && --sc->ndis_hang_timer == 0) {
IoQueueWorkItem(sc->ndis_tickitem,
(io_workitem_func)ndis_ticktask_wrap,
WORKQUEUE_CRITICAL, sc);
sc->ndis_hang_timer = sc->ndis_block->nmb_checkforhangsecs;
}
if (sc->ndis_tx_timer && --sc->ndis_tx_timer == 0) {
if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1);
device_printf(sc->ndis_dev, "watchdog timeout\n");
IoQueueWorkItem(sc->ndis_resetitem,
(io_workitem_func)ndis_resettask_wrap,
WORKQUEUE_CRITICAL, sc);
IoQueueWorkItem(sc->ndis_startitem,
(io_workitem_func)ndis_starttask_wrap,
WORKQUEUE_CRITICAL, sc->ifp);
}
callout_reset(&sc->ndis_stat_callout, hz, ndis_tick, sc);
}
static void
-ndis_ticktask(d, xsc)
- device_object *d;
- void *xsc;
+ndis_ticktask(device_object *d, void *xsc)
{
- struct ndis_softc *sc;
- struct ieee80211com *ic;
- struct ieee80211vap *vap;
+ struct ndis_softc *sc = xsc;
ndis_checkforhang_handler hangfunc;
uint8_t rval;
- sc = xsc;
- ic = sc->ifp->if_l2com;
- vap = TAILQ_FIRST(&ic->ic_vaps);
-
NDIS_LOCK(sc);
if (!NDIS_INITIALIZED(sc)) {
NDIS_UNLOCK(sc);
return;
}
NDIS_UNLOCK(sc);
hangfunc = sc->ndis_chars->nmc_checkhang_func;
if (hangfunc != NULL) {
rval = MSCALL1(hangfunc,
sc->ndis_block->nmb_miniportadapterctx);
if (rval == TRUE) {
ndis_reset_nic(sc);
return;
}
}
NDIS_LOCK(sc);
if (sc->ndis_link == 0 &&
sc->ndis_sts == NDIS_STATUS_MEDIA_CONNECT) {
sc->ndis_link = 1;
- if ((sc->ndis_80211 != 0) && (vap != NULL)) {
- NDIS_UNLOCK(sc);
- ndis_getstate_80211(sc);
- ieee80211_new_state(vap, IEEE80211_S_RUN, -1);
- NDIS_LOCK(sc);
- if_link_state_change(vap->iv_ifp, LINK_STATE_UP);
+ if (sc->ndis_80211 != 0) {
+ struct ieee80211com *ic = &sc->ndis_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ if (vap != NULL) {
+ NDIS_UNLOCK(sc);
+ ndis_getstate_80211(sc);
+ ieee80211_new_state(vap, IEEE80211_S_RUN, -1);
+ NDIS_LOCK(sc);
+ if_link_state_change(vap->iv_ifp,
+ LINK_STATE_UP);
+ }
} else
if_link_state_change(sc->ifp, LINK_STATE_UP);
}
if (sc->ndis_link == 1 &&
sc->ndis_sts == NDIS_STATUS_MEDIA_DISCONNECT) {
sc->ndis_link = 0;
- if ((sc->ndis_80211 != 0) && (vap != NULL)) {
- NDIS_UNLOCK(sc);
- ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
- NDIS_LOCK(sc);
- if_link_state_change(vap->iv_ifp, LINK_STATE_DOWN);
+ if (sc->ndis_80211 != 0) {
+ struct ieee80211com *ic = &sc->ndis_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ if (vap != NULL) {
+ NDIS_UNLOCK(sc);
+ ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
+ NDIS_LOCK(sc);
+ if_link_state_change(vap->iv_ifp,
+ LINK_STATE_DOWN);
+ }
} else
if_link_state_change(sc->ifp, LINK_STATE_DOWN);
}
NDIS_UNLOCK(sc);
}
static void
ndis_map_sclist(arg, segs, nseg, mapsize, error)
void *arg;
bus_dma_segment_t *segs;
int nseg;
bus_size_t mapsize;
int error;
{
struct ndis_sc_list *sclist;
int i;
if (error || arg == NULL)
return;
sclist = arg;
sclist->nsl_frags = nseg;
for (i = 0; i < nseg; i++) {
sclist->nsl_elements[i].nse_addr.np_quad = segs[i].ds_addr;
sclist->nsl_elements[i].nse_len = segs[i].ds_len;
}
}
static int
ndis_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
/* no support; just discard */
m_freem(m);
ieee80211_free_node(ni);
return (0);
}
static void
ndis_update_mcast(struct ieee80211com *ic)
{
struct ndis_softc *sc = ic->ic_softc;
ndis_setmulti(sc);
}
static void
ndis_update_promisc(struct ieee80211com *ic)
{
/* not supported */
}
static void
ndis_starttask(d, arg)
device_object *d;
void *arg;
{
struct ifnet *ifp;
ifp = arg;
if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
ndis_start(ifp);
}
/*
* Main transmit routine. To make NDIS drivers happy, we need to
* transform mbuf chains into NDIS packets and feed them to the
* send packet routines. Most drivers allow you to send several
* packets at once (up to the maxpkts limit). Unfortunately, rather
* that accepting them in the form of a linked list, they expect
* a contiguous array of pointers to packets.
*
* For those drivers which use the NDIS scatter/gather DMA mechanism,
* we need to perform busdma work here. Those that use map registers
* will do the mapping themselves on a buffer by buffer basis.
*/
static void
ndis_start(ifp)
struct ifnet *ifp;
{
struct ndis_softc *sc;
struct mbuf *m = NULL;
ndis_packet **p0 = NULL, *p = NULL;
ndis_tcpip_csum *csum;
int pcnt = 0, status;
sc = ifp->if_softc;
NDIS_LOCK(sc);
if (!sc->ndis_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) {
NDIS_UNLOCK(sc);
return;
}
p0 = &sc->ndis_txarray[sc->ndis_txidx];
while(sc->ndis_txpending) {
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
break;
NdisAllocatePacket(&status,
&sc->ndis_txarray[sc->ndis_txidx], sc->ndis_txpool);
if (status != NDIS_STATUS_SUCCESS)
break;
if (ndis_mtop(m, &sc->ndis_txarray[sc->ndis_txidx])) {
IFQ_DRV_PREPEND(&ifp->if_snd, m);
NDIS_UNLOCK(sc);
return;
}
/*
* Save pointer to original mbuf
* so we can free it later.
*/
p = sc->ndis_txarray[sc->ndis_txidx];
p->np_txidx = sc->ndis_txidx;
p->np_m0 = m;
p->np_oob.npo_status = NDIS_STATUS_PENDING;
/*
* Do scatter/gather processing, if driver requested it.
*/
if (sc->ndis_sc) {
bus_dmamap_load_mbuf(sc->ndis_ttag,
sc->ndis_tmaps[sc->ndis_txidx], m,
ndis_map_sclist, &p->np_sclist, BUS_DMA_NOWAIT);
bus_dmamap_sync(sc->ndis_ttag,
sc->ndis_tmaps[sc->ndis_txidx],
BUS_DMASYNC_PREREAD);
p->np_ext.npe_info[ndis_sclist_info] = &p->np_sclist;
}
/* Handle checksum offload. */
if (ifp->if_capenable & IFCAP_TXCSUM &&
m->m_pkthdr.csum_flags) {
csum = (ndis_tcpip_csum *)
&p->np_ext.npe_info[ndis_tcpipcsum_info];
csum->u.ntc_txflags = NDIS_TXCSUM_DO_IPV4;
if (m->m_pkthdr.csum_flags & CSUM_IP)
csum->u.ntc_txflags |= NDIS_TXCSUM_DO_IP;
if (m->m_pkthdr.csum_flags & CSUM_TCP)
csum->u.ntc_txflags |= NDIS_TXCSUM_DO_TCP;
if (m->m_pkthdr.csum_flags & CSUM_UDP)
csum->u.ntc_txflags |= NDIS_TXCSUM_DO_UDP;
p->np_private.npp_flags = NDIS_PROTOCOL_ID_TCP_IP;
}
NDIS_INC(sc);
sc->ndis_txpending--;
pcnt++;
/*
* If there's a BPF listener, bounce a copy of this frame
* to him.
*/
if (!sc->ndis_80211) /* XXX handle 80211 */
BPF_MTAP(ifp, m);
/*
* The array that p0 points to must appear contiguous,
* so we must not wrap past the end of sc->ndis_txarray[].
* If it looks like we're about to wrap, break out here
* so the this batch of packets can be transmitted, then
* wait for txeof to ask us to send the rest.
*/
if (sc->ndis_txidx == 0)
break;
}
if (pcnt == 0) {
NDIS_UNLOCK(sc);
return;
}
if (sc->ndis_txpending == 0)
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
/*
* Set a timeout in case the chip goes out to lunch.
*/
sc->ndis_tx_timer = 5;
NDIS_UNLOCK(sc);
/*
* According to NDIS documentation, if a driver exports
* a MiniportSendPackets() routine, we prefer that over
* a MiniportSend() routine (which sends just a single
* packet).
*/
if (sc->ndis_chars->nmc_sendmulti_func != NULL)
ndis_send_packets(sc, p0, pcnt);
else
ndis_send_packet(sc, p);
return;
}
+static int
+ndis_80211transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct ndis_softc *sc = ic->ic_softc;
+ ndis_packet **p0 = NULL, *p = NULL;
+ int status;
+
+ NDIS_LOCK(sc);
+ if (!sc->ndis_link || !sc->ndis_running) {
+ NDIS_UNLOCK(sc);
+ return (ENXIO);
+ }
+
+ if (sc->ndis_txpending == 0) {
+ NDIS_UNLOCK(sc);
+ return (ENOBUFS);
+ }
+
+ p0 = &sc->ndis_txarray[sc->ndis_txidx];
+
+ NdisAllocatePacket(&status,
+ &sc->ndis_txarray[sc->ndis_txidx], sc->ndis_txpool);
+
+ if (status != NDIS_STATUS_SUCCESS) {
+ NDIS_UNLOCK(sc);
+ return (ENOBUFS);
+ }
+
+ if (ndis_mtop(m, &sc->ndis_txarray[sc->ndis_txidx])) {
+ NDIS_UNLOCK(sc);
+ return (ENOBUFS);
+ }
+
+ /*
+ * Save pointer to original mbuf
+ * so we can free it later.
+ */
+
+ p = sc->ndis_txarray[sc->ndis_txidx];
+ p->np_txidx = sc->ndis_txidx;
+ p->np_m0 = m;
+ p->np_oob.npo_status = NDIS_STATUS_PENDING;
+
+ /*
+ * Do scatter/gather processing, if driver requested it.
+ */
+ if (sc->ndis_sc) {
+ bus_dmamap_load_mbuf(sc->ndis_ttag,
+ sc->ndis_tmaps[sc->ndis_txidx], m,
+ ndis_map_sclist, &p->np_sclist, BUS_DMA_NOWAIT);
+ bus_dmamap_sync(sc->ndis_ttag,
+ sc->ndis_tmaps[sc->ndis_txidx],
+ BUS_DMASYNC_PREREAD);
+ p->np_ext.npe_info[ndis_sclist_info] = &p->np_sclist;
+ }
+
+ NDIS_INC(sc);
+ sc->ndis_txpending--;
+
+ /*
+ * Set a timeout in case the chip goes out to lunch.
+ */
+ sc->ndis_tx_timer = 5;
+ NDIS_UNLOCK(sc);
+
+ /*
+ * According to NDIS documentation, if a driver exports
+ * a MiniportSendPackets() routine, we prefer that over
+ * a MiniportSend() routine (which sends just a single
+ * packet).
+ */
+ if (sc->ndis_chars->nmc_sendmulti_func != NULL)
+ ndis_send_packets(sc, p0, 1);
+ else
+ ndis_send_packet(sc, p);
+
+ return (0);
+}
+
static void
-ndis_init(xsc)
- void *xsc;
+ndis_80211parent(struct ieee80211com *ic)
{
+ struct ndis_softc *sc = ic->ic_softc;
+
+ /*NDIS_LOCK(sc);*/
+ if (ic->ic_nrunning > 0) {
+ if (!sc->ndis_running)
+ ndis_init(sc);
+ } else if (sc->ndis_running)
+ ndis_stop(sc);
+ /*NDIS_UNLOCK(sc);*/
+}
+
+static void
+ndis_init(void *xsc)
+{
struct ndis_softc *sc = xsc;
- struct ifnet *ifp = sc->ifp;
- struct ieee80211com *ic = ifp->if_l2com;
int i, len, error;
/*
* Avoid reintializing the link unnecessarily.
* This should be dealt with in a better way by
* fixing the upper layer modules so they don't
* call ifp->if_init() quite as often.
*/
if (sc->ndis_link)
return;
/*
* Cancel pending I/O and free all RX/TX buffers.
*/
ndis_stop(sc);
if (!(sc->ndis_iftype == PNPBus && ndisusb_halt == 0)) {
error = ndis_init_nic(sc);
if (error != 0) {
device_printf(sc->ndis_dev,
"failed to initialize the device: %d\n", error);
return;
}
}
- /* Init our MAC address */
-
/* Program the packet filter */
+ sc->ndis_filter = NDIS_PACKET_TYPE_DIRECTED |
+ NDIS_PACKET_TYPE_BROADCAST;
- sc->ndis_filter = NDIS_PACKET_TYPE_DIRECTED;
+ if (sc->ndis_80211) {
+ struct ieee80211com *ic = &sc->ndis_ic;
- if (ifp->if_flags & IFF_BROADCAST)
- sc->ndis_filter |= NDIS_PACKET_TYPE_BROADCAST;
+ if (ic->ic_promisc > 0)
+ sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS;
+ } else {
+ struct ifnet *ifp = sc->ifp;
- if (ifp->if_flags & IFF_PROMISC)
- sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS;
+ if (ifp->if_flags & IFF_PROMISC)
+ sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS;
+ }
len = sizeof(sc->ndis_filter);
error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &len);
if (error)
device_printf(sc->ndis_dev, "set filter failed: %d\n", error);
/*
* Set lookahead.
*/
- i = ifp->if_mtu;
+ if (sc->ndis_80211)
+ i = ETHERMTU;
+ else
+ i = sc->ifp->if_mtu;
len = sizeof(i);
ndis_set_info(sc, OID_GEN_CURRENT_LOOKAHEAD, &i, &len);
/*
* Program the multicast filter, if necessary.
*/
ndis_setmulti(sc);
/* Setup task offload. */
ndis_set_offload(sc);
NDIS_LOCK(sc);
sc->ndis_txidx = 0;
sc->ndis_txpending = sc->ndis_maxpkts;
sc->ndis_link = 0;
- if_link_state_change(sc->ifp, LINK_STATE_UNKNOWN);
+ if (!sc->ndis_80211) {
+ if_link_state_change(sc->ifp, LINK_STATE_UNKNOWN);
+ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ }
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->ndis_tx_timer = 0;
/*
* Some drivers don't set this value. The NDIS spec says
* the default checkforhang timeout is "approximately 2
* seconds." We use 3 seconds, because it seems for some
* drivers, exactly 2 seconds is too fast.
*/
if (sc->ndis_block->nmb_checkforhangsecs == 0)
sc->ndis_block->nmb_checkforhangsecs = 3;
sc->ndis_hang_timer = sc->ndis_block->nmb_checkforhangsecs;
callout_reset(&sc->ndis_stat_callout, hz, ndis_tick, sc);
+ sc->ndis_running = 1;
NDIS_UNLOCK(sc);
/* XXX force handling */
if (sc->ndis_80211)
- ieee80211_start_all(ic); /* start all vap's */
+ ieee80211_start_all(&sc->ndis_ic); /* start all vap's */
}
/*
* Set media options.
*/
static int
ndis_ifmedia_upd(ifp)
struct ifnet *ifp;
{
struct ndis_softc *sc;
sc = ifp->if_softc;
if (NDIS_INITIALIZED(sc))
ndis_init(sc);
return (0);
}
/*
* Report current media status.
*/
static void
ndis_ifmedia_sts(ifp, ifmr)
struct ifnet *ifp;
struct ifmediareq *ifmr;
{
struct ndis_softc *sc;
uint32_t media_info;
ndis_media_state linkstate;
int len;
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
sc = ifp->if_softc;
if (!NDIS_INITIALIZED(sc))
return;
len = sizeof(linkstate);
ndis_get_info(sc, OID_GEN_MEDIA_CONNECT_STATUS,
(void *)&linkstate, &len);
len = sizeof(media_info);
ndis_get_info(sc, OID_GEN_LINK_SPEED,
(void *)&media_info, &len);
if (linkstate == nmc_connected)
ifmr->ifm_status |= IFM_ACTIVE;
switch (media_info) {
case 100000:
ifmr->ifm_active |= IFM_10_T;
break;
case 1000000:
ifmr->ifm_active |= IFM_100_TX;
break;
case 10000000:
ifmr->ifm_active |= IFM_1000_T;
break;
default:
device_printf(sc->ndis_dev, "unknown speed: %d\n", media_info);
break;
}
}
static int
-ndis_set_cipher(sc, cipher)
- struct ndis_softc *sc;
- int cipher;
+ndis_set_cipher(struct ndis_softc *sc, int cipher)
{
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->ndis_ic;
int rval = 0, len;
uint32_t arg, save;
- ic = sc->ifp->if_l2com;
-
len = sizeof(arg);
if (cipher == WPA_CSE_WEP40 || cipher == WPA_CSE_WEP104) {
if (!(ic->ic_cryptocaps & IEEE80211_CRYPTO_WEP))
return (ENOTSUP);
arg = NDIS_80211_WEPSTAT_ENC1ENABLED;
}
if (cipher == WPA_CSE_TKIP) {
if (!(ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP))
return (ENOTSUP);
arg = NDIS_80211_WEPSTAT_ENC2ENABLED;
}
if (cipher == WPA_CSE_CCMP) {
if (!(ic->ic_cryptocaps & IEEE80211_CRYPTO_AES_CCM))
return (ENOTSUP);
arg = NDIS_80211_WEPSTAT_ENC3ENABLED;
}
DPRINTF(("Setting cipher to %d\n", arg));
save = arg;
rval = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len);
if (rval)
return (rval);
/* Check that the cipher was set correctly. */
len = sizeof(save);
rval = ndis_get_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len);
if (rval != 0 || arg != save)
return (ENODEV);
return (0);
}
/*
* WPA is hairy to set up. Do the work in a separate routine
* so we don't clutter the setstate function too much.
* Important yet undocumented fact: first we have to set the
* authentication mode, _then_ we enable the ciphers. If one
* of the WPA authentication modes isn't enabled, the driver
* might not permit the TKIP or AES ciphers to be selected.
*/
static int
ndis_set_wpa(sc, ie, ielen)
struct ndis_softc *sc;
void *ie;
int ielen;
{
struct ieee80211_ie_wpa *w;
struct ndis_ie *n;
char *pos;
uint32_t arg;
int i;
/*
* Apparently, the only way for us to know what ciphers
* and key management/authentication mode to use is for
* us to inspect the optional information element (IE)
* stored in the 802.11 state machine. This IE should be
* supplied by the WPA supplicant.
*/
w = (struct ieee80211_ie_wpa *)ie;
/* Check for the right kind of IE. */
if (w->wpa_id != IEEE80211_ELEMID_VENDOR) {
DPRINTF(("Incorrect IE type %d\n", w->wpa_id));
return (EINVAL);
}
/* Skip over the ucast cipher OIDs. */
pos = (char *)&w->wpa_uciphers[0];
pos += w->wpa_uciphercnt * sizeof(struct ndis_ie);
/* Skip over the authmode count. */
pos += sizeof(u_int16_t);
/*
* Check for the authentication modes. I'm
* pretty sure there's only supposed to be one.
*/
n = (struct ndis_ie *)pos;
if (n->ni_val == WPA_ASE_NONE)
arg = NDIS_80211_AUTHMODE_WPANONE;
if (n->ni_val == WPA_ASE_8021X_UNSPEC)
arg = NDIS_80211_AUTHMODE_WPA;
if (n->ni_val == WPA_ASE_8021X_PSK)
arg = NDIS_80211_AUTHMODE_WPAPSK;
DPRINTF(("Setting WPA auth mode to %d\n", arg));
i = sizeof(arg);
if (ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i))
return (ENOTSUP);
i = sizeof(arg);
ndis_get_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i);
/* Now configure the desired ciphers. */
/* First, set up the multicast group cipher. */
n = (struct ndis_ie *)&w->wpa_mcipher[0];
if (ndis_set_cipher(sc, n->ni_val))
return (ENOTSUP);
/* Now start looking around for the unicast ciphers. */
pos = (char *)&w->wpa_uciphers[0];
n = (struct ndis_ie *)pos;
for (i = 0; i < w->wpa_uciphercnt; i++) {
if (ndis_set_cipher(sc, n->ni_val))
return (ENOTSUP);
n++;
}
return (0);
}
static void
ndis_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct ieee80211vap *vap = ifp->if_softc;
- struct ndis_softc *sc = vap->iv_ic->ic_ifp->if_softc;
+ struct ndis_softc *sc = vap->iv_ic->ic_softc;
uint32_t txrate;
int len;
if (!NDIS_INITIALIZED(sc))
return;
len = sizeof(txrate);
if (ndis_get_info(sc, OID_GEN_LINK_SPEED, &txrate, &len) == 0)
vap->iv_bss->ni_txrate = txrate / 5000;
ieee80211_media_status(ifp, imr);
}
static void
-ndis_setstate_80211(sc)
- struct ndis_softc *sc;
+ndis_setstate_80211(struct ndis_softc *sc)
{
- struct ieee80211com *ic;
- struct ieee80211vap *vap;
+ struct ieee80211com *ic = &sc->ndis_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
ndis_80211_macaddr bssid;
ndis_80211_config config;
int rval = 0, len;
uint32_t arg;
- struct ifnet *ifp;
- ifp = sc->ifp;
- ic = ifp->if_l2com;
- vap = TAILQ_FIRST(&ic->ic_vaps);
-
if (!NDIS_INITIALIZED(sc)) {
DPRINTF(("%s: NDIS not initialized\n", __func__));
return;
}
/* Disassociate and turn off radio. */
len = sizeof(arg);
arg = 1;
ndis_set_info(sc, OID_802_11_DISASSOCIATE, &arg, &len);
/* Set network infrastructure mode. */
len = sizeof(arg);
if (ic->ic_opmode == IEEE80211_M_IBSS)
arg = NDIS_80211_NET_INFRA_IBSS;
else
arg = NDIS_80211_NET_INFRA_BSS;
rval = ndis_set_info(sc, OID_802_11_INFRASTRUCTURE_MODE, &arg, &len);
if (rval)
device_printf (sc->ndis_dev, "set infra failed: %d\n", rval);
/* Set power management */
len = sizeof(arg);
if (vap->iv_flags & IEEE80211_F_PMGTON)
arg = NDIS_80211_POWERMODE_FAST_PSP;
else
arg = NDIS_80211_POWERMODE_CAM;
ndis_set_info(sc, OID_802_11_POWER_MODE, &arg, &len);
/* Set TX power */
if ((ic->ic_caps & IEEE80211_C_TXPMGT) &&
ic->ic_txpowlimit < (sizeof(dBm2mW) / sizeof(dBm2mW[0]))) {
arg = dBm2mW[ic->ic_txpowlimit];
len = sizeof(arg);
ndis_set_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &len);
}
/*
* Default encryption mode to off, authentication
* to open and privacy to 'accept everything.'
*/
len = sizeof(arg);
arg = NDIS_80211_WEPSTAT_DISABLED;
ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len);
len = sizeof(arg);
arg = NDIS_80211_AUTHMODE_OPEN;
ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len);
/*
* Note that OID_802_11_PRIVACY_FILTER is optional:
* not all drivers implement it.
*/
len = sizeof(arg);
arg = NDIS_80211_PRIVFILT_8021XWEP;
ndis_set_info(sc, OID_802_11_PRIVACY_FILTER, &arg, &len);
len = sizeof(config);
bzero((char *)&config, len);
config.nc_length = len;
config.nc_fhconfig.ncf_length = sizeof(ndis_80211_config_fh);
rval = ndis_get_info(sc, OID_802_11_CONFIGURATION, &config, &len);
/*
* Some drivers expect us to initialize these values, so
* provide some defaults.
*/
if (config.nc_beaconperiod == 0)
config.nc_beaconperiod = 100;
if (config.nc_atimwin == 0)
config.nc_atimwin = 100;
if (config.nc_fhconfig.ncf_dwelltime == 0)
config.nc_fhconfig.ncf_dwelltime = 200;
if (rval == 0 && ic->ic_bsschan != IEEE80211_CHAN_ANYC) {
int chan, chanflag;
chan = ieee80211_chan2ieee(ic, ic->ic_bsschan);
chanflag = config.nc_dsconfig > 2500000 ? IEEE80211_CHAN_2GHZ :
IEEE80211_CHAN_5GHZ;
if (chan != ieee80211_mhz2ieee(config.nc_dsconfig / 1000, 0)) {
config.nc_dsconfig =
ic->ic_bsschan->ic_freq * 1000;
len = sizeof(config);
config.nc_length = len;
config.nc_fhconfig.ncf_length =
sizeof(ndis_80211_config_fh);
DPRINTF(("Setting channel to %ukHz\n", config.nc_dsconfig));
rval = ndis_set_info(sc, OID_802_11_CONFIGURATION,
&config, &len);
if (rval)
device_printf(sc->ndis_dev, "couldn't change "
"DS config to %ukHz: %d\n",
config.nc_dsconfig, rval);
}
} else if (rval)
device_printf(sc->ndis_dev, "couldn't retrieve "
"channel info: %d\n", rval);
/* Set the BSSID to our value so the driver doesn't associate */
len = IEEE80211_ADDR_LEN;
- bcopy(IF_LLADDR(ifp), bssid, len);
+ bcopy(vap->iv_myaddr, bssid, len);
DPRINTF(("Setting BSSID to %6D\n", (uint8_t *)&bssid, ":"));
rval = ndis_set_info(sc, OID_802_11_BSSID, &bssid, &len);
if (rval)
device_printf(sc->ndis_dev,
"setting BSSID failed: %d\n", rval);
}
static void
-ndis_auth_and_assoc(sc, vap)
- struct ndis_softc *sc;
- struct ieee80211vap *vap;
+ndis_auth_and_assoc(struct ndis_softc *sc, struct ieee80211vap *vap)
{
- struct ieee80211com *ic;
- struct ieee80211_node *ni;
+ struct ieee80211_node *ni = vap->iv_bss;
ndis_80211_ssid ssid;
ndis_80211_macaddr bssid;
ndis_80211_wep wep;
int i, rval = 0, len, error;
uint32_t arg;
- struct ifnet *ifp;
- ifp = sc->ifp;
- ic = ifp->if_l2com;
- ni = vap->iv_bss;
-
if (!NDIS_INITIALIZED(sc)) {
DPRINTF(("%s: NDIS not initialized\n", __func__));
return;
}
/* Initial setup */
ndis_setstate_80211(sc);
/* Set network infrastructure mode. */
len = sizeof(arg);
if (vap->iv_opmode == IEEE80211_M_IBSS)
arg = NDIS_80211_NET_INFRA_IBSS;
else
arg = NDIS_80211_NET_INFRA_BSS;
rval = ndis_set_info(sc, OID_802_11_INFRASTRUCTURE_MODE, &arg, &len);
if (rval)
device_printf (sc->ndis_dev, "set infra failed: %d\n", rval);
/* Set RTS threshold */
len = sizeof(arg);
arg = vap->iv_rtsthreshold;
ndis_set_info(sc, OID_802_11_RTS_THRESHOLD, &arg, &len);
/* Set fragmentation threshold */
len = sizeof(arg);
arg = vap->iv_fragthreshold;
ndis_set_info(sc, OID_802_11_FRAGMENTATION_THRESHOLD, &arg, &len);
/* Set WEP */
if (vap->iv_flags & IEEE80211_F_PRIVACY &&
!(vap->iv_flags & IEEE80211_F_WPA)) {
int keys_set = 0;
if (ni->ni_authmode == IEEE80211_AUTH_SHARED) {
len = sizeof(arg);
arg = NDIS_80211_AUTHMODE_SHARED;
DPRINTF(("Setting shared auth\n"));
ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE,
&arg, &len);
}
for (i = 0; i < IEEE80211_WEP_NKID; i++) {
if (vap->iv_nw_keys[i].wk_keylen) {
if (vap->iv_nw_keys[i].wk_cipher->ic_cipher !=
IEEE80211_CIPHER_WEP)
continue;
bzero((char *)&wep, sizeof(wep));
wep.nw_keylen = vap->iv_nw_keys[i].wk_keylen;
/*
* 5, 13 and 16 are the only valid
* key lengths. Anything in between
* will be zero padded out to the
* next highest boundary.
*/
if (vap->iv_nw_keys[i].wk_keylen < 5)
wep.nw_keylen = 5;
else if (vap->iv_nw_keys[i].wk_keylen > 5 &&
vap->iv_nw_keys[i].wk_keylen < 13)
wep.nw_keylen = 13;
else if (vap->iv_nw_keys[i].wk_keylen > 13 &&
vap->iv_nw_keys[i].wk_keylen < 16)
wep.nw_keylen = 16;
wep.nw_keyidx = i;
wep.nw_length = (sizeof(uint32_t) * 3)
+ wep.nw_keylen;
if (i == vap->iv_def_txkey)
wep.nw_keyidx |= NDIS_80211_WEPKEY_TX;
bcopy(vap->iv_nw_keys[i].wk_key,
wep.nw_keydata, wep.nw_length);
len = sizeof(wep);
DPRINTF(("Setting WEP key %d\n", i));
rval = ndis_set_info(sc,
OID_802_11_ADD_WEP, &wep, &len);
if (rval)
device_printf(sc->ndis_dev,
"set wepkey failed: %d\n", rval);
keys_set++;
}
}
if (keys_set) {
DPRINTF(("Setting WEP on\n"));
arg = NDIS_80211_WEPSTAT_ENABLED;
len = sizeof(arg);
rval = ndis_set_info(sc,
OID_802_11_WEP_STATUS, &arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"enable WEP failed: %d\n", rval);
if (vap->iv_flags & IEEE80211_F_DROPUNENC)
arg = NDIS_80211_PRIVFILT_8021XWEP;
else
arg = NDIS_80211_PRIVFILT_ACCEPTALL;
len = sizeof(arg);
ndis_set_info(sc,
OID_802_11_PRIVACY_FILTER, &arg, &len);
}
}
/* Set up WPA. */
if ((vap->iv_flags & IEEE80211_F_WPA) &&
vap->iv_appie_assocreq != NULL) {
struct ieee80211_appie *ie = vap->iv_appie_assocreq;
error = ndis_set_wpa(sc, ie->ie_data, ie->ie_len);
if (error != 0)
device_printf(sc->ndis_dev, "WPA setup failed\n");
}
#ifdef notyet
/* Set network type. */
arg = 0;
switch (vap->iv_curmode) {
case IEEE80211_MODE_11A:
arg = NDIS_80211_NETTYPE_11OFDM5;
break;
case IEEE80211_MODE_11B:
arg = NDIS_80211_NETTYPE_11DS;
break;
case IEEE80211_MODE_11G:
arg = NDIS_80211_NETTYPE_11OFDM24;
break;
default:
device_printf(sc->ndis_dev, "unknown mode: %d\n",
vap->iv_curmode);
}
if (arg) {
DPRINTF(("Setting network type to %d\n", arg));
len = sizeof(arg);
rval = ndis_set_info(sc, OID_802_11_NETWORK_TYPE_IN_USE,
&arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"set nettype failed: %d\n", rval);
}
#endif
/*
* If the user selected a specific BSSID, try
* to use that one. This is useful in the case where
* there are several APs in range with the same network
* name. To delete the BSSID, we use the broadcast
* address as the BSSID.
* Note that some drivers seem to allow setting a BSSID
* in ad-hoc mode, which has the effect of forcing the
* NIC to create an ad-hoc cell with a specific BSSID,
* instead of a randomly chosen one. However, the net80211
* code makes the assumtion that the BSSID setting is invalid
* when you're in ad-hoc mode, so we don't allow that here.
*/
len = IEEE80211_ADDR_LEN;
if (vap->iv_flags & IEEE80211_F_DESBSSID &&
vap->iv_opmode != IEEE80211_M_IBSS)
bcopy(ni->ni_bssid, bssid, len);
else
- bcopy(ifp->if_broadcastaddr, bssid, len);
+ bcopy(ieee80211broadcastaddr, bssid, len);
DPRINTF(("Setting BSSID to %6D\n", (uint8_t *)&bssid, ":"));
rval = ndis_set_info(sc, OID_802_11_BSSID, &bssid, &len);
if (rval)
device_printf(sc->ndis_dev,
"setting BSSID failed: %d\n", rval);
/* Set SSID -- always do this last. */
#ifdef NDIS_DEBUG
if (ndis_debug > 0) {
printf("Setting ESSID to ");
ieee80211_print_essid(ni->ni_essid, ni->ni_esslen);
printf("\n");
}
#endif
len = sizeof(ssid);
bzero((char *)&ssid, len);
ssid.ns_ssidlen = ni->ni_esslen;
if (ssid.ns_ssidlen == 0) {
ssid.ns_ssidlen = 1;
} else
bcopy(ni->ni_essid, ssid.ns_ssid, ssid.ns_ssidlen);
rval = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len);
if (rval)
device_printf (sc->ndis_dev, "set ssid failed: %d\n", rval);
return;
}
static int
ndis_get_bssid_list(sc, bl)
struct ndis_softc *sc;
ndis_80211_bssid_list_ex **bl;
{
int len, error;
len = sizeof(uint32_t) + (sizeof(ndis_wlan_bssid_ex) * 16);
*bl = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (*bl == NULL)
return (ENOMEM);
error = ndis_get_info(sc, OID_802_11_BSSID_LIST, *bl, &len);
if (error == ENOSPC) {
free(*bl, M_DEVBUF);
*bl = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
if (*bl == NULL)
return (ENOMEM);
error = ndis_get_info(sc, OID_802_11_BSSID_LIST, *bl, &len);
}
if (error) {
DPRINTF(("%s: failed to read\n", __func__));
free(*bl, M_DEVBUF);
return (error);
}
return (0);
}
static int
-ndis_get_assoc(sc, assoc)
- struct ndis_softc *sc;
- ndis_wlan_bssid_ex **assoc;
+ndis_get_assoc(struct ndis_softc *sc, ndis_wlan_bssid_ex **assoc)
{
- struct ifnet *ifp = sc->ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->ndis_ic;
struct ieee80211vap *vap;
struct ieee80211_node *ni;
ndis_80211_bssid_list_ex *bl;
ndis_wlan_bssid_ex *bs;
ndis_80211_macaddr bssid;
int i, len, error;
if (!sc->ndis_link)
return (ENOENT);
len = sizeof(bssid);
error = ndis_get_info(sc, OID_802_11_BSSID, &bssid, &len);
if (error) {
device_printf(sc->ndis_dev, "failed to get bssid\n");
return (ENOENT);
}
vap = TAILQ_FIRST(&ic->ic_vaps);
ni = vap->iv_bss;
error = ndis_get_bssid_list(sc, &bl);
if (error)
return (error);
bs = (ndis_wlan_bssid_ex *)&bl->nblx_bssid[0];
for (i = 0; i < bl->nblx_items; i++) {
if (bcmp(bs->nwbx_macaddr, bssid, sizeof(bssid)) == 0) {
*assoc = malloc(bs->nwbx_len, M_TEMP, M_NOWAIT);
if (*assoc == NULL) {
free(bl, M_TEMP);
return (ENOMEM);
}
bcopy((char *)bs, (char *)*assoc, bs->nwbx_len);
free(bl, M_TEMP);
if (ic->ic_opmode == IEEE80211_M_STA)
ni->ni_associd = 1 | 0xc000; /* fake associd */
return (0);
}
bs = (ndis_wlan_bssid_ex *)((char *)bs + bs->nwbx_len);
}
free(bl, M_TEMP);
return (ENOENT);
}
static void
-ndis_getstate_80211(sc)
- struct ndis_softc *sc;
+ndis_getstate_80211(struct ndis_softc *sc)
{
- struct ieee80211com *ic;
- struct ieee80211vap *vap;
- struct ieee80211_node *ni;
+ struct ieee80211com *ic = &sc->ndis_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
ndis_wlan_bssid_ex *bs;
int rval, len, i = 0;
int chanflag;
uint32_t arg;
- struct ifnet *ifp;
- ifp = sc->ifp;
- ic = ifp->if_l2com;
- vap = TAILQ_FIRST(&ic->ic_vaps);
- ni = vap->iv_bss;
-
if (!NDIS_INITIALIZED(sc))
return;
if ((rval = ndis_get_assoc(sc, &bs)) != 0)
return;
/* We're associated, retrieve info on the current bssid. */
ic->ic_curmode = ndis_nettype_mode(bs->nwbx_nettype);
chanflag = ndis_nettype_chan(bs->nwbx_nettype);
IEEE80211_ADDR_COPY(ni->ni_bssid, bs->nwbx_macaddr);
/* Get SSID from current association info. */
bcopy(bs->nwbx_ssid.ns_ssid, ni->ni_essid,
bs->nwbx_ssid.ns_ssidlen);
ni->ni_esslen = bs->nwbx_ssid.ns_ssidlen;
if (ic->ic_caps & IEEE80211_C_PMGT) {
len = sizeof(arg);
rval = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"get power mode failed: %d\n", rval);
if (arg == NDIS_80211_POWERMODE_CAM)
vap->iv_flags &= ~IEEE80211_F_PMGTON;
else
vap->iv_flags |= IEEE80211_F_PMGTON;
}
/* Get TX power */
if (ic->ic_caps & IEEE80211_C_TXPMGT) {
len = sizeof(arg);
ndis_get_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &len);
for (i = 0; i < (sizeof(dBm2mW) / sizeof(dBm2mW[0])); i++)
if (dBm2mW[i] >= arg)
break;
ic->ic_txpowlimit = i;
}
/*
* Use the current association information to reflect
* what channel we're on.
*/
ic->ic_curchan = ieee80211_find_channel(ic,
bs->nwbx_config.nc_dsconfig / 1000, chanflag);
if (ic->ic_curchan == NULL)
ic->ic_curchan = &ic->ic_channels[0];
ni->ni_chan = ic->ic_curchan;
ic->ic_bsschan = ic->ic_curchan;
free(bs, M_TEMP);
/*
* Determine current authentication mode.
*/
len = sizeof(arg);
rval = ndis_get_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"get authmode status failed: %d\n", rval);
else {
vap->iv_flags &= ~IEEE80211_F_WPA;
switch (arg) {
case NDIS_80211_AUTHMODE_OPEN:
ni->ni_authmode = IEEE80211_AUTH_OPEN;
break;
case NDIS_80211_AUTHMODE_SHARED:
ni->ni_authmode = IEEE80211_AUTH_SHARED;
break;
case NDIS_80211_AUTHMODE_AUTO:
ni->ni_authmode = IEEE80211_AUTH_AUTO;
break;
case NDIS_80211_AUTHMODE_WPA:
case NDIS_80211_AUTHMODE_WPAPSK:
case NDIS_80211_AUTHMODE_WPANONE:
ni->ni_authmode = IEEE80211_AUTH_WPA;
vap->iv_flags |= IEEE80211_F_WPA1;
break;
case NDIS_80211_AUTHMODE_WPA2:
case NDIS_80211_AUTHMODE_WPA2PSK:
ni->ni_authmode = IEEE80211_AUTH_WPA;
vap->iv_flags |= IEEE80211_F_WPA2;
break;
default:
ni->ni_authmode = IEEE80211_AUTH_NONE;
break;
}
}
len = sizeof(arg);
rval = ndis_get_info(sc, OID_802_11_WEP_STATUS, &arg, &len);
if (rval)
device_printf(sc->ndis_dev,
"get wep status failed: %d\n", rval);
if (arg == NDIS_80211_WEPSTAT_ENABLED)
vap->iv_flags |= IEEE80211_F_PRIVACY|IEEE80211_F_DROPUNENC;
else
vap->iv_flags &= ~(IEEE80211_F_PRIVACY|IEEE80211_F_DROPUNENC);
}
static int
ndis_ioctl(ifp, command, data)
struct ifnet *ifp;
u_long command;
caddr_t data;
{
struct ndis_softc *sc = ifp->if_softc;
struct ifreq *ifr = (struct ifreq *) data;
int i, error = 0;
/*NDIS_LOCK(sc);*/
switch (command) {
case SIOCSIFFLAGS:
if (ifp->if_flags & IFF_UP) {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
+ if (sc->ndis_running &&
ifp->if_flags & IFF_PROMISC &&
!(sc->ndis_if_flags & IFF_PROMISC)) {
sc->ndis_filter |=
NDIS_PACKET_TYPE_PROMISCUOUS;
i = sizeof(sc->ndis_filter);
error = ndis_set_info(sc,
OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &i);
- } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
+ } else if (sc->ndis_running &&
!(ifp->if_flags & IFF_PROMISC) &&
sc->ndis_if_flags & IFF_PROMISC) {
sc->ndis_filter &=
~NDIS_PACKET_TYPE_PROMISCUOUS;
i = sizeof(sc->ndis_filter);
error = ndis_set_info(sc,
OID_GEN_CURRENT_PACKET_FILTER,
&sc->ndis_filter, &i);
} else
ndis_init(sc);
} else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->ndis_running)
ndis_stop(sc);
}
sc->ndis_if_flags = ifp->if_flags;
error = 0;
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
ndis_setmulti(sc);
error = 0;
break;
case SIOCGIFMEDIA:
case SIOCSIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
break;
case SIOCSIFCAP:
ifp->if_capenable = ifr->ifr_reqcap;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist = sc->ndis_hwassist;
else
ifp->if_hwassist = 0;
ndis_set_offload(sc);
break;
default:
error = ether_ioctl(ifp, command, data);
break;
}
/*NDIS_UNLOCK(sc);*/
return(error);
}
static int
-ndis_ioctl_80211(ifp, command, data)
- struct ifnet *ifp;
- u_long command;
- caddr_t data;
+ndis_80211ioctl(struct ieee80211com *ic, u_long cmd, void *data)
{
- struct ndis_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- struct ndis_oid_data oid;
- struct ndis_evt evt;
- void *oidbuf;
- int error = 0;
+ struct ndis_softc *sc = ic->ic_softc;
+ struct ifreq *ifr = data;
+ struct ndis_oid_data oid;
+ struct ndis_evt evt;
+ void *oidbuf = NULL;
+ int error = 0;
- switch (command) {
- case SIOCSIFFLAGS:
- /*NDIS_LOCK(sc);*/
- if (ifp->if_flags & IFF_UP) {
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ndis_init(sc);
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ndis_stop(sc);
- }
- sc->ndis_if_flags = ifp->if_flags;
- error = 0;
- /*NDIS_UNLOCK(sc);*/
- break;
+ if ((error = priv_check(curthread, PRIV_DRIVER)) != 0)
+ return (error);
+
+ switch (cmd) {
case SIOCGDRVSPEC:
- if ((error = priv_check(curthread, PRIV_DRIVER)))
- break;
- error = copyin(ifr->ifr_data, &oid, sizeof(oid));
+ case SIOCSDRVSPEC:
+ error = copyin(ifr->ifr_data, &oid, sizeof(oid));
if (error)
break;
- oidbuf = malloc(oid.len, M_TEMP, M_NOWAIT|M_ZERO);
- if (oidbuf == NULL) {
- error = ENOMEM;
- break;
- }
- error = copyin(ifr->ifr_data + sizeof(oid), oidbuf, oid.len);
- if (error) {
- free(oidbuf, M_TEMP);
- break;
- }
- error = ndis_get_info(sc, oid.oid, oidbuf, &oid.len);
- if (error) {
- free(oidbuf, M_TEMP);
- break;
- }
- error = copyout(&oid, ifr->ifr_data, sizeof(oid));
- if (error) {
- free(oidbuf, M_TEMP);
- break;
- }
- error = copyout(oidbuf, ifr->ifr_data + sizeof(oid), oid.len);
+ oidbuf = malloc(oid.len, M_TEMP, M_WAITOK | M_ZERO);
+ error = copyin(ifr->ifr_data + sizeof(oid), oidbuf, oid.len);
+ }
+
+ if (error) {
free(oidbuf, M_TEMP);
+ return (error);
+ }
+
+ switch (cmd) {
+ case SIOCGDRVSPEC:
+ error = ndis_get_info(sc, oid.oid, oidbuf, &oid.len);
break;
case SIOCSDRVSPEC:
- if ((error = priv_check(curthread, PRIV_DRIVER)))
- break;
- error = copyin(ifr->ifr_data, &oid, sizeof(oid));
- if (error)
- break;
- oidbuf = malloc(oid.len, M_TEMP, M_NOWAIT|M_ZERO);
- if (oidbuf == NULL) {
- error = ENOMEM;
- break;
- }
- error = copyin(ifr->ifr_data + sizeof(oid), oidbuf, oid.len);
- if (error) {
- free(oidbuf, M_TEMP);
- break;
- }
error = ndis_set_info(sc, oid.oid, oidbuf, &oid.len);
- if (error) {
- free(oidbuf, M_TEMP);
- break;
- }
- error = copyout(&oid, ifr->ifr_data, sizeof(oid));
- if (error) {
- free(oidbuf, M_TEMP);
- break;
- }
- error = copyout(oidbuf, ifr->ifr_data + sizeof(oid), oid.len);
- free(oidbuf, M_TEMP);
break;
case SIOCGPRIVATE_0:
- if ((error = priv_check(curthread, PRIV_DRIVER)))
- break;
NDIS_LOCK(sc);
if (sc->ndis_evt[sc->ndis_evtcidx].ne_sts == 0) {
error = ENOENT;
NDIS_UNLOCK(sc);
break;
}
- error = copyin(ifr->ifr_data, &evt, sizeof(evt));
+ error = copyin(ifr->ifr_data, &evt, sizeof(evt));
if (error) {
NDIS_UNLOCK(sc);
break;
}
if (evt.ne_len < sc->ndis_evt[sc->ndis_evtcidx].ne_len) {
error = ENOSPC;
NDIS_UNLOCK(sc);
break;
}
error = copyout(&sc->ndis_evt[sc->ndis_evtcidx],
ifr->ifr_data, sizeof(uint32_t) * 2);
if (error) {
NDIS_UNLOCK(sc);
break;
}
if (sc->ndis_evt[sc->ndis_evtcidx].ne_len) {
error = copyout(sc->ndis_evt[sc->ndis_evtcidx].ne_buf,
ifr->ifr_data + (sizeof(uint32_t) * 2),
sc->ndis_evt[sc->ndis_evtcidx].ne_len);
if (error) {
NDIS_UNLOCK(sc);
break;
}
free(sc->ndis_evt[sc->ndis_evtcidx].ne_buf, M_TEMP);
sc->ndis_evt[sc->ndis_evtcidx].ne_buf = NULL;
}
sc->ndis_evt[sc->ndis_evtcidx].ne_len = 0;
sc->ndis_evt[sc->ndis_evtcidx].ne_sts = 0;
NDIS_EVTINC(sc->ndis_evtcidx);
NDIS_UNLOCK(sc);
break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, command);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, command, data);
- break;
default:
- error = EINVAL;
+ error = ENOTTY;
break;
}
+
+ switch (cmd) {
+ case SIOCGDRVSPEC:
+ case SIOCSDRVSPEC:
+ error = copyout(&oid, ifr->ifr_data, sizeof(oid));
+ if (error)
+ break;
+ error = copyout(oidbuf, ifr->ifr_data + sizeof(oid), oid.len);
+ }
+
+ free(oidbuf, M_TEMP);
+
return (error);
}
int
-ndis_del_key(vap, key)
- struct ieee80211vap *vap;
- const struct ieee80211_key *key;
+ndis_del_key(struct ieee80211vap *vap, const struct ieee80211_key *key)
{
- struct ndis_softc *sc;
+ struct ndis_softc *sc = vap->iv_ic->ic_softc;
ndis_80211_key rkey;
int len, error = 0;
- sc = vap->iv_ic->ic_ifp->if_softc;
-
bzero((char *)&rkey, sizeof(rkey));
len = sizeof(rkey);
rkey.nk_len = len;
rkey.nk_keyidx = key->wk_keyix;
bcopy(vap->iv_ifp->if_broadcastaddr,
rkey.nk_bssid, IEEE80211_ADDR_LEN);
error = ndis_set_info(sc, OID_802_11_REMOVE_KEY, &rkey, &len);
if (error)
return (0);
return (1);
}
/*
* In theory this could be called for any key, but we'll
* only use it for WPA TKIP or AES keys. These need to be
* set after initial authentication with the AP.
*/
static int
-ndis_add_key(vap, key, mac)
- struct ieee80211vap *vap;
- const struct ieee80211_key *key;
- const uint8_t mac[IEEE80211_ADDR_LEN];
+ndis_add_key(struct ieee80211vap *vap, const struct ieee80211_key *key,
+ const uint8_t mac[IEEE80211_ADDR_LEN])
{
- struct ndis_softc *sc;
- struct ifnet *ifp;
+ struct ndis_softc *sc = vap->iv_ic->ic_softc;
ndis_80211_key rkey;
int len, error = 0;
- ifp = vap->iv_ic->ic_ifp;
- sc = ifp->if_softc;
-
switch (key->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_TKIP:
len = sizeof(ndis_80211_key);
bzero((char *)&rkey, sizeof(rkey));
rkey.nk_len = len;
rkey.nk_keylen = key->wk_keylen;
if (key->wk_flags & IEEE80211_KEY_SWMIC)
rkey.nk_keylen += 16;
/* key index - gets weird in NDIS */
if (key->wk_keyix != IEEE80211_KEYIX_NONE)
rkey.nk_keyidx = key->wk_keyix;
else
rkey.nk_keyidx = 0;
if (key->wk_flags & IEEE80211_KEY_XMIT)
rkey.nk_keyidx |= 1 << 31;
if (key->wk_flags & IEEE80211_KEY_GROUP) {
- bcopy(ifp->if_broadcastaddr,
+ bcopy(ieee80211broadcastaddr,
rkey.nk_bssid, IEEE80211_ADDR_LEN);
} else {
bcopy(vap->iv_bss->ni_bssid,
rkey.nk_bssid, IEEE80211_ADDR_LEN);
/* pairwise key */
rkey.nk_keyidx |= 1 << 30;
}
/* need to set bit 29 based on keyrsc */
rkey.nk_keyrsc = key->wk_keyrsc[0]; /* XXX need tid */
if (rkey.nk_keyrsc)
rkey.nk_keyidx |= 1 << 29;
if (key->wk_flags & IEEE80211_KEY_SWMIC) {
bcopy(key->wk_key, rkey.nk_keydata, 16);
bcopy(key->wk_key + 24, rkey.nk_keydata + 16, 8);
bcopy(key->wk_key + 16, rkey.nk_keydata + 24, 8);
} else
bcopy(key->wk_key, rkey.nk_keydata, key->wk_keylen);
error = ndis_set_info(sc, OID_802_11_ADD_KEY, &rkey, &len);
break;
case IEEE80211_CIPHER_WEP:
error = 0;
break;
/*
* I don't know how to set up keys for the AES
* cipher yet. Is it the same as TKIP?
*/
case IEEE80211_CIPHER_AES_CCM:
default:
error = ENOTTY;
break;
}
/* We need to return 1 for success, 0 for failure. */
if (error)
return (0);
return (1);
}
static void
ndis_resettask(d, arg)
device_object *d;
void *arg;
{
struct ndis_softc *sc;
sc = arg;
ndis_reset_nic(sc);
}
/*
* Stop the adapter and free any mbufs allocated to the
* RX and TX lists.
*/
static void
-ndis_stop(sc)
- struct ndis_softc *sc;
+ndis_stop(struct ndis_softc *sc)
{
- struct ifnet *ifp;
int i;
- ifp = sc->ifp;
callout_drain(&sc->ndis_stat_callout);
NDIS_LOCK(sc);
sc->ndis_tx_timer = 0;
sc->ndis_link = 0;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ if (!sc->ndis_80211)
+ sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->ndis_running = 0;
NDIS_UNLOCK(sc);
if (sc->ndis_iftype != PNPBus ||
(sc->ndis_iftype == PNPBus &&
!(sc->ndisusb_status & NDISUSB_STATUS_DETACH) &&
ndisusb_halt != 0))
ndis_halt_nic(sc);
NDIS_LOCK(sc);
for (i = 0; i < NDIS_EVENTS; i++) {
if (sc->ndis_evt[i].ne_sts && sc->ndis_evt[i].ne_buf != NULL) {
free(sc->ndis_evt[i].ne_buf, M_TEMP);
sc->ndis_evt[i].ne_buf = NULL;
}
sc->ndis_evt[i].ne_sts = 0;
sc->ndis_evt[i].ne_len = 0;
}
sc->ndis_evtcidx = 0;
sc->ndis_evtpidx = 0;
NDIS_UNLOCK(sc);
}
/*
* Stop all chip I/O so that the kernel's probe routines don't
* get confused by errant DMAs when rebooting.
*/
void
ndis_shutdown(dev)
device_t dev;
{
struct ndis_softc *sc;
sc = device_get_softc(dev);
ndis_stop(sc);
}
static int
ndis_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ndis_vap *nvp = NDIS_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct ndis_softc *sc = ifp->if_softc;
+ struct ndis_softc *sc = ic->ic_softc;
enum ieee80211_state ostate;
DPRINTF(("%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]));
ostate = vap->iv_state;
vap->iv_state = nstate;
switch (nstate) {
/* pass on to net80211 */
case IEEE80211_S_INIT:
case IEEE80211_S_SCAN:
return nvp->newstate(vap, nstate, arg);
case IEEE80211_S_ASSOC:
if (ostate != IEEE80211_S_AUTH) {
IEEE80211_UNLOCK(ic);
ndis_auth_and_assoc(sc, vap);
IEEE80211_LOCK(ic);
}
break;
case IEEE80211_S_AUTH:
IEEE80211_UNLOCK(ic);
ndis_auth_and_assoc(sc, vap);
if (vap->iv_state == IEEE80211_S_AUTH) /* XXX */
ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0);
IEEE80211_LOCK(ic);
break;
default:
break;
}
return (0);
}
static void
ndis_scan(void *arg)
{
struct ieee80211vap *vap = arg;
ieee80211_scan_done(vap);
}
static void
ndis_scan_results(struct ndis_softc *sc)
{
- struct ieee80211com *ic;
- struct ieee80211vap *vap;
+ struct ieee80211com *ic = &sc->ndis_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
ndis_80211_bssid_list_ex *bl;
ndis_wlan_bssid_ex *wb;
struct ieee80211_scanparams sp;
struct ieee80211_frame wh;
struct ieee80211_channel *saved_chan;
int i, j;
int rssi, noise, freq, chanflag;
uint8_t ssid[2+IEEE80211_NWID_LEN];
uint8_t rates[2+IEEE80211_RATE_MAXSIZE];
uint8_t *frm, *efrm;
- ic = sc->ifp->if_l2com;
- vap = TAILQ_FIRST(&ic->ic_vaps);
saved_chan = ic->ic_curchan;
noise = -96;
if (ndis_get_bssid_list(sc, &bl))
return;
DPRINTF(("%s: %d results\n", __func__, bl->nblx_items));
wb = &bl->nblx_bssid[0];
for (i = 0; i < bl->nblx_items; i++) {
memset(&sp, 0, sizeof(sp));
memcpy(wh.i_addr2, wb->nwbx_macaddr, sizeof(wh.i_addr2));
memcpy(wh.i_addr3, wb->nwbx_macaddr, sizeof(wh.i_addr3));
rssi = 100 * (wb->nwbx_rssi - noise) / (-32 - noise);
rssi = max(0, min(rssi, 100)); /* limit 0 <= rssi <= 100 */
if (wb->nwbx_privacy)
sp.capinfo |= IEEE80211_CAPINFO_PRIVACY;
sp.bintval = wb->nwbx_config.nc_beaconperiod;
switch (wb->nwbx_netinfra) {
case NDIS_80211_NET_INFRA_IBSS:
sp.capinfo |= IEEE80211_CAPINFO_IBSS;
break;
case NDIS_80211_NET_INFRA_BSS:
sp.capinfo |= IEEE80211_CAPINFO_ESS;
break;
}
sp.rates = &rates[0];
for (j = 0; j < IEEE80211_RATE_MAXSIZE; j++) {
/* XXX - check units */
if (wb->nwbx_supportedrates[j] == 0)
break;
rates[2 + j] =
wb->nwbx_supportedrates[j] & 0x7f;
}
rates[1] = j;
sp.ssid = (uint8_t *)&ssid[0];
memcpy(sp.ssid + 2, &wb->nwbx_ssid.ns_ssid,
wb->nwbx_ssid.ns_ssidlen);
sp.ssid[1] = wb->nwbx_ssid.ns_ssidlen;
chanflag = ndis_nettype_chan(wb->nwbx_nettype);
freq = wb->nwbx_config.nc_dsconfig / 1000;
sp.chan = sp.bchan = ieee80211_mhz2ieee(freq, chanflag);
/* Hack ic->ic_curchan to be in sync with the scan result */
ic->ic_curchan = ieee80211_find_channel(ic, freq, chanflag);
if (ic->ic_curchan == NULL)
ic->ic_curchan = &ic->ic_channels[0];
/* Process extended info from AP */
if (wb->nwbx_len > sizeof(ndis_wlan_bssid)) {
frm = (uint8_t *)&wb->nwbx_ies;
efrm = frm + wb->nwbx_ielen;
if (efrm - frm < 12)
goto done;
sp.tstamp = frm; frm += 8;
sp.bintval = le16toh(*(uint16_t *)frm); frm += 2;
sp.capinfo = le16toh(*(uint16_t *)frm); frm += 2;
sp.ies = frm;
sp.ies_len = efrm - frm;
}
done:
DPRINTF(("scan: bssid %s chan %dMHz (%d/%d) rssi %d\n",
ether_sprintf(wb->nwbx_macaddr), freq, sp.bchan, chanflag,
rssi));
ieee80211_add_scan(vap, ic->ic_curchan, &sp, &wh, 0, rssi, noise);
wb = (ndis_wlan_bssid_ex *)((char *)wb + wb->nwbx_len);
}
free(bl, M_DEVBUF);
/* Restore the channel after messing with it */
ic->ic_curchan = saved_chan;
}
static void
ndis_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct ndis_softc *sc = ifp->if_softc;
+ struct ndis_softc *sc = ic->ic_softc;
struct ieee80211vap *vap;
struct ieee80211_scan_state *ss;
ndis_80211_ssid ssid;
int error, len;
ss = ic->ic_scan;
vap = TAILQ_FIRST(&ic->ic_vaps);
if (!NDIS_INITIALIZED(sc)) {
DPRINTF(("%s: scan aborted\n", __func__));
ieee80211_cancel_scan(vap);
return;
}
len = sizeof(ssid);
bzero((char *)&ssid, len);
if (ss->ss_nssid == 0)
ssid.ns_ssidlen = 1;
else {
/* Perform a directed scan */
ssid.ns_ssidlen = ss->ss_ssid[0].len;
bcopy(ss->ss_ssid[0].ssid, ssid.ns_ssid, ssid.ns_ssidlen);
}
error = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len);
if (error)
DPRINTF(("%s: set ESSID failed\n", __func__));
len = 0;
error = ndis_set_info(sc, OID_802_11_BSSID_LIST_SCAN, NULL, &len);
if (error) {
DPRINTF(("%s: scan command failed\n", __func__));
ieee80211_cancel_scan(vap);
return;
}
/* Set a timer to collect the results */
callout_reset(&sc->ndis_scan_callout, hz * 3, ndis_scan, vap);
}
static void
ndis_set_channel(struct ieee80211com *ic)
{
/* ignore */
}
static void
ndis_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
{
/* ignore */
}
static void
ndis_scan_mindwell(struct ieee80211_scan_state *ss)
{
/* NB: don't try to abort scan; wait for firmware to finish */
}
static void
ndis_scan_end(struct ieee80211com *ic)
{
- struct ndis_softc *sc = ic->ic_ifp->if_softc;
+ struct ndis_softc *sc = ic->ic_softc;
ndis_scan_results(sc);
}
Index: head/sys/dev/if_ndis/if_ndisvar.h
===================================================================
--- head/sys/dev/if_ndis/if_ndisvar.h (revision 287196)
+++ head/sys/dev/if_ndis/if_ndisvar.h (revision 287197)
@@ -1,253 +1,260 @@
/*-
* Copyright (c) 2003
* Bill Paul <wpaul@windriver.com>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#define NDIS_DEFAULT_NODENAME "FreeBSD NDIS node"
#define NDIS_NODENAME_LEN 32
/* For setting/getting OIDs from userspace. */
struct ndis_oid_data {
uint32_t oid;
uint32_t len;
#ifdef notdef
uint8_t data[1];
#endif
};
struct ndis_pci_type {
uint16_t ndis_vid;
uint16_t ndis_did;
uint32_t ndis_subsys;
char *ndis_name;
};
struct ndis_pccard_type {
const char *ndis_vid;
const char *ndis_did;
char *ndis_name;
};
struct ndis_usb_type {
uint16_t ndis_vid;
uint16_t ndis_did;
char *ndis_name;
};
struct ndis_shmem {
list_entry ndis_list;
bus_dma_tag_t ndis_stag;
bus_dmamap_t ndis_smap;
void *ndis_saddr;
ndis_physaddr ndis_paddr;
};
struct ndis_cfglist {
ndis_cfg ndis_cfg;
struct sysctl_oid *ndis_oid;
TAILQ_ENTRY(ndis_cfglist) link;
};
/*
* Helper struct to make parsing information
* elements easier.
*/
struct ndis_ie {
uint8_t ni_oui[3];
uint8_t ni_val;
};
TAILQ_HEAD(nch, ndis_cfglist);
#define NDIS_INITIALIZED(sc) (sc->ndis_block->nmb_devicectx != NULL)
#define NDIS_TXPKTS 64
#define NDIS_INC(x) \
(x)->ndis_txidx = ((x)->ndis_txidx + 1) % (x)->ndis_maxpkts
#define NDIS_EVENTS 4
#define NDIS_EVTINC(x) (x) = ((x) + 1) % NDIS_EVENTS
struct ndis_evt {
uint32_t ne_sts;
uint32_t ne_len;
char *ne_buf;
};
struct ndis_vap {
struct ieee80211vap vap;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define NDIS_VAP(vap) ((struct ndis_vap *)(vap))
#define NDISUSB_CONFIG_NO 0
#define NDISUSB_IFACE_INDEX 0
/* XXX at USB2 there's no USBD_NO_TIMEOUT macro anymore */
#define NDISUSB_NO_TIMEOUT 0
#define NDISUSB_INTR_TIMEOUT 1000
#define NDISUSB_TX_TIMEOUT 10000
struct ndisusb_xfer;
struct ndisusb_ep {
struct usb_xfer *ne_xfer[1];
list_entry ne_active;
list_entry ne_pending;
kspin_lock ne_lock;
uint8_t ne_dirin;
};
struct ndisusb_xfer {
struct ndisusb_ep *nx_ep;
void *nx_priv;
uint8_t *nx_urbbuf;
uint32_t nx_urbactlen;
uint32_t nx_urblen;
uint8_t nx_shortxfer;
list_entry nx_next;
};
struct ndisusb_xferdone {
struct ndisusb_xfer *nd_xfer;
usb_error_t nd_status;
list_entry nd_donelist;
};
struct ndisusb_task {
unsigned nt_type;
#define NDISUSB_TASK_TSTART 0
#define NDISUSB_TASK_IRPCANCEL 1
#define NDISUSB_TASK_VENDOR 2
void *nt_ctx;
list_entry nt_tasklist;
};
struct ndis_softc {
- struct ifnet *ifp;
- struct ifmedia ifmedia; /* media info */
+ u_int ndis_80211:1,
+ ndis_link:1,
+ ndis_running:1;
+ union {
+ struct { /* Ethernet */
+ struct ifnet *ifp;
+ struct ifmedia ifmedia;
+ int ndis_if_flags;
+ };
+ struct { /* Wireless */
+ struct ieee80211com ndis_ic;
+ struct callout ndis_scan_callout;
+ int (*ndis_newstate)(struct ieee80211com *,
+ enum ieee80211_state, int);
+ };
+ };
u_long ndis_hwassist;
uint32_t ndis_v4tx;
uint32_t ndis_v4rx;
bus_space_handle_t ndis_bhandle;
bus_space_tag_t ndis_btag;
void *ndis_intrhand;
struct resource *ndis_irq;
struct resource *ndis_res;
struct resource *ndis_res_io;
int ndis_io_rid;
struct resource *ndis_res_mem;
int ndis_mem_rid;
struct resource *ndis_res_altmem;
int ndis_altmem_rid;
struct resource *ndis_res_am; /* attribute mem (pccard) */
int ndis_am_rid;
struct resource *ndis_res_cm; /* common mem (pccard) */
struct resource_list ndis_rl;
int ndis_rescnt;
struct mtx ndis_mtx;
uint8_t ndis_irql;
device_t ndis_dev;
int ndis_unit;
ndis_miniport_block *ndis_block;
ndis_miniport_characteristics *ndis_chars;
interface_type ndis_type;
- struct callout ndis_scan_callout;
struct callout ndis_stat_callout;
int ndis_maxpkts;
ndis_oid *ndis_oids;
int ndis_oidcnt;
int ndis_txidx;
int ndis_txpending;
ndis_packet **ndis_txarray;
ndis_handle ndis_txpool;
int ndis_sc;
ndis_cfg *ndis_regvals;
struct nch ndis_cfglist_head;
- int ndis_80211;
- int ndis_link;
uint32_t ndis_sts;
uint32_t ndis_filter;
- int ndis_if_flags;
int ndis_skip;
-
int ndis_devidx;
interface_type ndis_iftype;
driver_object *ndis_dobj;
io_workitem *ndis_tickitem;
io_workitem *ndis_startitem;
io_workitem *ndis_resetitem;
io_workitem *ndis_inputitem;
kdpc ndis_rxdpc;
bus_dma_tag_t ndis_parent_tag;
list_entry ndis_shlist;
bus_dma_tag_t ndis_mtag;
bus_dma_tag_t ndis_ttag;
bus_dmamap_t *ndis_mmaps;
bus_dmamap_t *ndis_tmaps;
int ndis_mmapcnt;
struct ndis_evt ndis_evt[NDIS_EVENTS];
int ndis_evtpidx;
int ndis_evtcidx;
- struct ifqueue ndis_rxqueue;
+ struct mbufq ndis_rxqueue;
kspin_lock ndis_rxlock;
- int (*ndis_newstate)(struct ieee80211com *,
- enum ieee80211_state, int);
int ndis_tx_timer;
int ndis_hang_timer;
struct usb_device *ndisusb_dev;
struct mtx ndisusb_mtx;
struct ndisusb_ep ndisusb_dread_ep;
struct ndisusb_ep ndisusb_dwrite_ep;
#define NDISUSB_GET_ENDPT(addr) \
((UE_GET_DIR(addr) >> 7) | (UE_GET_ADDR(addr) << 1))
#define NDISUSB_ENDPT_MAX ((UE_ADDR + 1) * 2)
struct ndisusb_ep ndisusb_ep[NDISUSB_ENDPT_MAX];
io_workitem *ndisusb_xferdoneitem;
list_entry ndisusb_xferdonelist;
kspin_lock ndisusb_xferdonelock;
io_workitem *ndisusb_taskitem;
list_entry ndisusb_tasklist;
kspin_lock ndisusb_tasklock;
int ndisusb_status;
#define NDISUSB_STATUS_DETACH 0x1
#define NDISUSB_STATUS_SETUP_EP 0x2
};
#define NDIS_LOCK(_sc) mtx_lock(&(_sc)->ndis_mtx)
#define NDIS_UNLOCK(_sc) mtx_unlock(&(_sc)->ndis_mtx)
#define NDIS_LOCK_ASSERT(_sc, t) mtx_assert(&(_sc)->ndis_mtx, t)
#define NDISUSB_LOCK(_sc) mtx_lock(&(_sc)->ndisusb_mtx)
#define NDISUSB_UNLOCK(_sc) mtx_unlock(&(_sc)->ndisusb_mtx)
#define NDISUSB_LOCK_ASSERT(_sc, t) mtx_assert(&(_sc)->ndisusb_mtx, t)
Index: head/sys/dev/ipw/if_ipw.c
===================================================================
--- head/sys/dev/ipw/if_ipw.c (revision 287196)
+++ head/sys/dev/ipw/if_ipw.c (revision 287197)
@@ -1,2717 +1,2654 @@
/*-
* Copyright (c) 2004-2006
* Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
* Copyright (c) 2006 Sam Leffler, Errno Consulting
* Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Intel(R) PRO/Wireless 2100 MiniPCI driver
* http://www.intel.com/network/connectivity/products/wireless/prowireless_mobile.htm
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <dev/ipw/if_ipwreg.h>
#include <dev/ipw/if_ipwvar.h>
#define IPW_DEBUG
#ifdef IPW_DEBUG
#define DPRINTF(x) do { if (ipw_debug > 0) printf x; } while (0)
#define DPRINTFN(n, x) do { if (ipw_debug >= (n)) printf x; } while (0)
int ipw_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, ipw, CTLFLAG_RW, &ipw_debug, 0, "ipw debug level");
#else
#define DPRINTF(x)
#define DPRINTFN(n, x)
#endif
MODULE_DEPEND(ipw, pci, 1, 1, 1);
MODULE_DEPEND(ipw, wlan, 1, 1, 1);
MODULE_DEPEND(ipw, firmware, 1, 1, 1);
struct ipw_ident {
uint16_t vendor;
uint16_t device;
const char *name;
};
static const struct ipw_ident ipw_ident_table[] = {
{ 0x8086, 0x1043, "Intel(R) PRO/Wireless 2100 MiniPCI" },
{ 0, 0, NULL }
};
static struct ieee80211vap *ipw_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void ipw_vap_delete(struct ieee80211vap *);
static int ipw_dma_alloc(struct ipw_softc *);
static void ipw_release(struct ipw_softc *);
static void ipw_media_status(struct ifnet *, struct ifmediareq *);
static int ipw_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static uint16_t ipw_read_prom_word(struct ipw_softc *, uint8_t);
static void ipw_rx_cmd_intr(struct ipw_softc *, struct ipw_soft_buf *);
static void ipw_rx_newstate_intr(struct ipw_softc *, struct ipw_soft_buf *);
static void ipw_rx_data_intr(struct ipw_softc *, struct ipw_status *,
struct ipw_soft_bd *, struct ipw_soft_buf *);
static void ipw_rx_intr(struct ipw_softc *);
static void ipw_release_sbd(struct ipw_softc *, struct ipw_soft_bd *);
static void ipw_tx_intr(struct ipw_softc *);
static void ipw_intr(void *);
static void ipw_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static const char * ipw_cmdname(int);
static int ipw_cmd(struct ipw_softc *, uint32_t, void *, uint32_t);
-static int ipw_tx_start(struct ifnet *, struct mbuf *,
+static int ipw_tx_start(struct ipw_softc *, struct mbuf *,
struct ieee80211_node *);
static int ipw_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
-static void ipw_start(struct ifnet *);
-static void ipw_start_locked(struct ifnet *);
+static int ipw_transmit(struct ieee80211com *, struct mbuf *);
+static void ipw_start(struct ipw_softc *);
static void ipw_watchdog(void *);
-static int ipw_ioctl(struct ifnet *, u_long, caddr_t);
+static void ipw_parent(struct ieee80211com *);
static void ipw_stop_master(struct ipw_softc *);
static int ipw_enable(struct ipw_softc *);
static int ipw_disable(struct ipw_softc *);
static int ipw_reset(struct ipw_softc *);
static int ipw_load_ucode(struct ipw_softc *, const char *, int);
static int ipw_load_firmware(struct ipw_softc *, const char *, int);
static int ipw_config(struct ipw_softc *);
static void ipw_assoc(struct ieee80211com *, struct ieee80211vap *);
static void ipw_disassoc(struct ieee80211com *, struct ieee80211vap *);
static void ipw_init_task(void *, int);
static void ipw_init(void *);
static void ipw_init_locked(struct ipw_softc *);
static void ipw_stop(void *);
static void ipw_stop_locked(struct ipw_softc *);
static int ipw_sysctl_stats(SYSCTL_HANDLER_ARGS);
static int ipw_sysctl_radio(SYSCTL_HANDLER_ARGS);
static uint32_t ipw_read_table1(struct ipw_softc *, uint32_t);
static void ipw_write_table1(struct ipw_softc *, uint32_t, uint32_t);
#if 0
static int ipw_read_table2(struct ipw_softc *, uint32_t, void *,
uint32_t *);
static void ipw_read_mem_1(struct ipw_softc *, bus_size_t, uint8_t *,
bus_size_t);
#endif
static void ipw_write_mem_1(struct ipw_softc *, bus_size_t,
const uint8_t *, bus_size_t);
static int ipw_scan(struct ipw_softc *);
static void ipw_scan_start(struct ieee80211com *);
static void ipw_scan_end(struct ieee80211com *);
static void ipw_set_channel(struct ieee80211com *);
static void ipw_scan_curchan(struct ieee80211_scan_state *,
unsigned long maxdwell);
static void ipw_scan_mindwell(struct ieee80211_scan_state *);
static int ipw_probe(device_t);
static int ipw_attach(device_t);
static int ipw_detach(device_t);
static int ipw_shutdown(device_t);
static int ipw_suspend(device_t);
static int ipw_resume(device_t);
static device_method_t ipw_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ipw_probe),
DEVMETHOD(device_attach, ipw_attach),
DEVMETHOD(device_detach, ipw_detach),
DEVMETHOD(device_shutdown, ipw_shutdown),
DEVMETHOD(device_suspend, ipw_suspend),
DEVMETHOD(device_resume, ipw_resume),
DEVMETHOD_END
};
static driver_t ipw_driver = {
"ipw",
ipw_methods,
sizeof (struct ipw_softc)
};
static devclass_t ipw_devclass;
DRIVER_MODULE(ipw, pci, ipw_driver, ipw_devclass, NULL, NULL);
MODULE_VERSION(ipw, 1);
static int
ipw_probe(device_t dev)
{
const struct ipw_ident *ident;
for (ident = ipw_ident_table; ident->name != NULL; ident++) {
if (pci_get_vendor(dev) == ident->vendor &&
pci_get_device(dev) == ident->device) {
device_set_desc(dev, ident->name);
return (BUS_PROBE_DEFAULT);
}
}
return ENXIO;
}
/* Base Address Register */
static int
ipw_attach(device_t dev)
{
struct ipw_softc *sc = device_get_softc(dev);
- struct ifnet *ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_channel *c;
uint16_t val;
int error, i;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF | MTX_RECURSE);
-
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
TASK_INIT(&sc->sc_init_task, 0, ipw_init_task, sc);
callout_init_mtx(&sc->sc_wdtimer, &sc->sc_mtx, 0);
pci_write_config(dev, 0x41, 0, 1);
/* enable bus-mastering */
pci_enable_busmaster(dev);
i = PCIR_BAR(0);
sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE);
if (sc->mem == NULL) {
device_printf(dev, "could not allocate memory resource\n");
goto fail;
}
sc->sc_st = rman_get_bustag(sc->mem);
sc->sc_sh = rman_get_bushandle(sc->mem);
i = 0;
sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i,
RF_ACTIVE | RF_SHAREABLE);
if (sc->irq == NULL) {
device_printf(dev, "could not allocate interrupt resource\n");
goto fail1;
}
if (ipw_reset(sc) != 0) {
device_printf(dev, "could not reset adapter\n");
goto fail2;
}
if (ipw_dma_alloc(sc) != 0) {
device_printf(dev, "could not allocate DMA resources\n");
goto fail2;
}
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- goto fail3;
- }
- ic = ifp->if_l2com;
-
- ifp->if_softc = sc;
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = ipw_init;
- ifp->if_ioctl = ipw_ioctl;
- ifp->if_start = ipw_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_phytype = IEEE80211_T_DS;
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_IBSS /* IBSS mode supported */
| IEEE80211_C_MONITOR /* monitor mode supported */
| IEEE80211_C_PMGT /* power save supported */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_WPA /* 802.11i supported */
;
/* read MAC address from EEPROM */
val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 0);
- macaddr[0] = val >> 8;
- macaddr[1] = val & 0xff;
+ ic->ic_macaddr[0] = val >> 8;
+ ic->ic_macaddr[1] = val & 0xff;
val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 1);
- macaddr[2] = val >> 8;
- macaddr[3] = val & 0xff;
+ ic->ic_macaddr[2] = val >> 8;
+ ic->ic_macaddr[3] = val & 0xff;
val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 2);
- macaddr[4] = val >> 8;
- macaddr[5] = val & 0xff;
+ ic->ic_macaddr[4] = val >> 8;
+ ic->ic_macaddr[5] = val & 0xff;
/* set supported .11b channels (read from EEPROM) */
if ((val = ipw_read_prom_word(sc, IPW_EEPROM_CHANNEL_LIST)) == 0)
val = 0x7ff; /* default to channels 1-11 */
val <<= 1;
for (i = 1; i < 16; i++) {
if (val & (1 << i)) {
c = &ic->ic_channels[ic->ic_nchans++];
c->ic_freq = ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ);
c->ic_flags = IEEE80211_CHAN_B;
c->ic_ieee = i;
}
}
/* check support for radio transmitter switch in EEPROM */
if (!(ipw_read_prom_word(sc, IPW_EEPROM_RADIO) & 8))
sc->flags |= IPW_FLAG_HAS_RADIO_SWITCH;
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
ic->ic_scan_start = ipw_scan_start;
ic->ic_scan_end = ipw_scan_end;
ic->ic_set_channel = ipw_set_channel;
ic->ic_scan_curchan = ipw_scan_curchan;
ic->ic_scan_mindwell = ipw_scan_mindwell;
ic->ic_raw_xmit = ipw_raw_xmit;
-
ic->ic_vap_create = ipw_vap_create;
ic->ic_vap_delete = ipw_vap_delete;
+ ic->ic_transmit = ipw_transmit;
+ ic->ic_parent = ipw_parent;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
IPW_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
IPW_RX_RADIOTAP_PRESENT);
/*
* Add a few sysctl knobs.
*/
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "radio",
CTLTYPE_INT | CTLFLAG_RD, sc, 0, ipw_sysctl_radio, "I",
"radio transmitter switch state (0=off, 1=on)");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats",
CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0, ipw_sysctl_stats, "S",
"statistics");
/*
* Hook our interrupt after all initialization is complete.
*/
error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, ipw_intr, sc, &sc->sc_ih);
if (error != 0) {
device_printf(dev, "could not set up interrupt\n");
- goto fail4;
+ goto fail3;
}
if (bootverbose)
ieee80211_announce(ic);
return 0;
-fail4:
- if_free(ifp);
fail3:
ipw_release(sc);
fail2:
bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq);
fail1:
bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem),
sc->mem);
fail:
mtx_destroy(&sc->sc_mtx);
return ENXIO;
}
static int
ipw_detach(device_t dev)
{
struct ipw_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
bus_teardown_intr(dev, sc->irq, sc->sc_ih);
ieee80211_draintask(ic, &sc->sc_init_task);
ipw_stop(sc);
ieee80211_ifdetach(ic);
callout_drain(&sc->sc_wdtimer);
+ mbufq_drain(&sc->sc_snd);
ipw_release(sc);
bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq);
bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem),
sc->mem);
- if_free(ifp);
-
if (sc->sc_firmware != NULL) {
firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD);
sc->sc_firmware = NULL;
}
mtx_destroy(&sc->sc_mtx);
return 0;
}
static struct ieee80211vap *
ipw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ipw_softc *sc = ic->ic_softc;
struct ipw_vap *ivp;
struct ieee80211vap *vap;
const struct firmware *fp;
const struct ipw_firmware_hdr *hdr;
const char *imagename;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
switch (opmode) {
case IEEE80211_M_STA:
imagename = "ipw_bss";
break;
case IEEE80211_M_IBSS:
imagename = "ipw_ibss";
break;
case IEEE80211_M_MONITOR:
imagename = "ipw_monitor";
break;
default:
return NULL;
}
/*
* Load firmware image using the firmware(9) subsystem. Doing
* this unlocked is ok since we're single-threaded by the
* 802.11 layer.
*/
if (sc->sc_firmware == NULL ||
strcmp(sc->sc_firmware->name, imagename) != 0) {
if (sc->sc_firmware != NULL)
firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD);
sc->sc_firmware = firmware_get(imagename);
}
if (sc->sc_firmware == NULL) {
device_printf(sc->sc_dev,
"could not load firmware image '%s'\n", imagename);
return NULL;
}
fp = sc->sc_firmware;
if (fp->datasize < sizeof *hdr) {
device_printf(sc->sc_dev,
"firmware image too short %zu\n", fp->datasize);
firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD);
sc->sc_firmware = NULL;
return NULL;
}
hdr = (const struct ipw_firmware_hdr *)fp->data;
if (fp->datasize < sizeof *hdr + le32toh(hdr->mainsz) +
le32toh(hdr->ucodesz)) {
device_printf(sc->sc_dev,
"firmware image too short %zu\n", fp->datasize);
firmware_put(sc->sc_firmware, FIRMWARE_UNLOAD);
sc->sc_firmware = NULL;
return NULL;
}
- ivp = (struct ipw_vap *) malloc(sizeof(struct ipw_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (ivp == NULL)
- return NULL;
+ ivp = malloc(sizeof(struct ipw_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &ivp->vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override with driver methods */
ivp->newstate = vap->iv_newstate;
vap->iv_newstate = ipw_newstate;
/* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, ipw_media_status);
+ ieee80211_vap_attach(vap, ieee80211_media_change, ipw_media_status,
+ mac);
ic->ic_opmode = opmode;
return vap;
}
static void
ipw_vap_delete(struct ieee80211vap *vap)
{
struct ipw_vap *ivp = IPW_VAP(vap);
ieee80211_vap_detach(vap);
free(ivp, M_80211_VAP);
}
static int
ipw_dma_alloc(struct ipw_softc *sc)
{
struct ipw_soft_bd *sbd;
struct ipw_soft_hdr *shdr;
struct ipw_soft_buf *sbuf;
bus_addr_t physaddr;
int error, i;
/*
* Allocate parent DMA tag for subsequent allocations.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
BUS_SPACE_MAXSIZE_32BIT, BUS_SPACE_UNRESTRICTED,
BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->parent_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create parent DMA tag\n");
goto fail;
}
/*
* Allocate and map tx ring.
*/
error = bus_dma_tag_create(sc->parent_dmat, 4, 0, BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR, NULL, NULL, IPW_TBD_SZ, 1, IPW_TBD_SZ, 0, NULL,
NULL, &sc->tbd_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create tx ring DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(sc->tbd_dmat, (void **)&sc->tbd_list,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tbd_map);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate tx ring DMA memory\n");
goto fail;
}
error = bus_dmamap_load(sc->tbd_dmat, sc->tbd_map, sc->tbd_list,
IPW_TBD_SZ, ipw_dma_map_addr, &sc->tbd_phys, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map tx ring DMA memory\n");
goto fail;
}
/*
* Allocate and map rx ring.
*/
error = bus_dma_tag_create(sc->parent_dmat, 4, 0, BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR, NULL, NULL, IPW_RBD_SZ, 1, IPW_RBD_SZ, 0, NULL,
NULL, &sc->rbd_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create rx ring DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(sc->rbd_dmat, (void **)&sc->rbd_list,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rbd_map);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate rx ring DMA memory\n");
goto fail;
}
error = bus_dmamap_load(sc->rbd_dmat, sc->rbd_map, sc->rbd_list,
IPW_RBD_SZ, ipw_dma_map_addr, &sc->rbd_phys, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map rx ring DMA memory\n");
goto fail;
}
/*
* Allocate and map status ring.
*/
error = bus_dma_tag_create(sc->parent_dmat, 4, 0, BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR, NULL, NULL, IPW_STATUS_SZ, 1, IPW_STATUS_SZ, 0,
NULL, NULL, &sc->status_dmat);
if (error != 0) {
device_printf(sc->sc_dev,
"could not create status ring DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(sc->status_dmat, (void **)&sc->status_list,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->status_map);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate status ring DMA memory\n");
goto fail;
}
error = bus_dmamap_load(sc->status_dmat, sc->status_map,
sc->status_list, IPW_STATUS_SZ, ipw_dma_map_addr, &sc->status_phys,
0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map status ring DMA memory\n");
goto fail;
}
/*
* Allocate command DMA map.
*/
error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR, NULL, NULL, sizeof (struct ipw_cmd), 1,
sizeof (struct ipw_cmd), 0, NULL, NULL, &sc->cmd_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create command DMA tag\n");
goto fail;
}
error = bus_dmamap_create(sc->cmd_dmat, 0, &sc->cmd_map);
if (error != 0) {
device_printf(sc->sc_dev,
"could not create command DMA map\n");
goto fail;
}
/*
* Allocate headers DMA maps.
*/
error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR, NULL, NULL, sizeof (struct ipw_hdr), 1,
sizeof (struct ipw_hdr), 0, NULL, NULL, &sc->hdr_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create header DMA tag\n");
goto fail;
}
SLIST_INIT(&sc->free_shdr);
for (i = 0; i < IPW_NDATA; i++) {
shdr = &sc->shdr_list[i];
error = bus_dmamap_create(sc->hdr_dmat, 0, &shdr->map);
if (error != 0) {
device_printf(sc->sc_dev,
"could not create header DMA map\n");
goto fail;
}
SLIST_INSERT_HEAD(&sc->free_shdr, shdr, next);
}
/*
* Allocate tx buffers DMA maps.
*/
error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IPW_MAX_NSEG, MCLBYTES, 0,
NULL, NULL, &sc->txbuf_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create tx DMA tag\n");
goto fail;
}
SLIST_INIT(&sc->free_sbuf);
for (i = 0; i < IPW_NDATA; i++) {
sbuf = &sc->tx_sbuf_list[i];
error = bus_dmamap_create(sc->txbuf_dmat, 0, &sbuf->map);
if (error != 0) {
device_printf(sc->sc_dev,
"could not create tx DMA map\n");
goto fail;
}
SLIST_INSERT_HEAD(&sc->free_sbuf, sbuf, next);
}
/*
* Initialize tx ring.
*/
for (i = 0; i < IPW_NTBD; i++) {
sbd = &sc->stbd_list[i];
sbd->bd = &sc->tbd_list[i];
sbd->type = IPW_SBD_TYPE_NOASSOC;
}
/*
* Pre-allocate rx buffers and DMA maps.
*/
error = bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL,
NULL, &sc->rxbuf_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create rx DMA tag\n");
goto fail;
}
for (i = 0; i < IPW_NRBD; i++) {
sbd = &sc->srbd_list[i];
sbuf = &sc->rx_sbuf_list[i];
sbd->bd = &sc->rbd_list[i];
sbuf->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (sbuf->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
error = bus_dmamap_create(sc->rxbuf_dmat, 0, &sbuf->map);
if (error != 0) {
device_printf(sc->sc_dev,
"could not create rx DMA map\n");
goto fail;
}
error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map,
mtod(sbuf->m, void *), MCLBYTES, ipw_dma_map_addr,
&physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map rx DMA memory\n");
goto fail;
}
sbd->type = IPW_SBD_TYPE_DATA;
sbd->priv = sbuf;
sbd->bd->physaddr = htole32(physaddr);
sbd->bd->len = htole32(MCLBYTES);
}
bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE);
return 0;
fail: ipw_release(sc);
return error;
}
static void
ipw_release(struct ipw_softc *sc)
{
struct ipw_soft_buf *sbuf;
int i;
if (sc->parent_dmat != NULL) {
bus_dma_tag_destroy(sc->parent_dmat);
}
if (sc->tbd_dmat != NULL) {
bus_dmamap_unload(sc->tbd_dmat, sc->tbd_map);
bus_dmamem_free(sc->tbd_dmat, sc->tbd_list, sc->tbd_map);
bus_dma_tag_destroy(sc->tbd_dmat);
}
if (sc->rbd_dmat != NULL) {
if (sc->rbd_list != NULL) {
bus_dmamap_unload(sc->rbd_dmat, sc->rbd_map);
bus_dmamem_free(sc->rbd_dmat, sc->rbd_list,
sc->rbd_map);
}
bus_dma_tag_destroy(sc->rbd_dmat);
}
if (sc->status_dmat != NULL) {
if (sc->status_list != NULL) {
bus_dmamap_unload(sc->status_dmat, sc->status_map);
bus_dmamem_free(sc->status_dmat, sc->status_list,
sc->status_map);
}
bus_dma_tag_destroy(sc->status_dmat);
}
for (i = 0; i < IPW_NTBD; i++)
ipw_release_sbd(sc, &sc->stbd_list[i]);
if (sc->cmd_dmat != NULL) {
bus_dmamap_destroy(sc->cmd_dmat, sc->cmd_map);
bus_dma_tag_destroy(sc->cmd_dmat);
}
if (sc->hdr_dmat != NULL) {
for (i = 0; i < IPW_NDATA; i++)
bus_dmamap_destroy(sc->hdr_dmat, sc->shdr_list[i].map);
bus_dma_tag_destroy(sc->hdr_dmat);
}
if (sc->txbuf_dmat != NULL) {
for (i = 0; i < IPW_NDATA; i++) {
bus_dmamap_destroy(sc->txbuf_dmat,
sc->tx_sbuf_list[i].map);
}
bus_dma_tag_destroy(sc->txbuf_dmat);
}
if (sc->rxbuf_dmat != NULL) {
for (i = 0; i < IPW_NRBD; i++) {
sbuf = &sc->rx_sbuf_list[i];
if (sbuf->m != NULL) {
bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxbuf_dmat, sbuf->map);
m_freem(sbuf->m);
}
bus_dmamap_destroy(sc->rxbuf_dmat, sbuf->map);
}
bus_dma_tag_destroy(sc->rxbuf_dmat);
}
}
static int
ipw_shutdown(device_t dev)
{
struct ipw_softc *sc = device_get_softc(dev);
ipw_stop(sc);
return 0;
}
static int
ipw_suspend(device_t dev)
{
struct ipw_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
ieee80211_suspend_all(ic);
return 0;
}
static int
ipw_resume(device_t dev)
{
struct ipw_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
pci_write_config(dev, 0x41, 0, 1);
ieee80211_resume_all(ic);
return 0;
}
static int
ipw_cvtrate(int ipwrate)
{
switch (ipwrate) {
case IPW_RATE_DS1: return 2;
case IPW_RATE_DS2: return 4;
case IPW_RATE_DS5: return 11;
case IPW_RATE_DS11: return 22;
}
return 0;
}
/*
* The firmware automatically adapts the transmit speed. We report its current
* value here.
*/
static void
ipw_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
struct ipw_softc *sc = ic->ic_softc;
/* read current transmission rate from adapter */
vap->iv_bss->ni_txrate = ipw_cvtrate(
ipw_read_table1(sc, IPW_INFO_CURRENT_TX_RATE) & 0xf);
ieee80211_media_status(ifp, imr);
}
static int
ipw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ipw_vap *ivp = IPW_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct ipw_softc *sc = ic->ic_softc;
enum ieee80211_state ostate;
DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate], sc->flags));
ostate = vap->iv_state;
IEEE80211_UNLOCK(ic);
switch (nstate) {
case IEEE80211_S_RUN:
if (ic->ic_opmode == IEEE80211_M_IBSS) {
/*
* XXX when joining an ibss network we are called
* with a SCAN -> RUN transition on scan complete.
* Use that to call ipw_assoc. On completing the
* join we are then called again with an AUTH -> RUN
* transition and we want to do nothing. This is
* all totally bogus and needs to be redone.
*/
if (ostate == IEEE80211_S_SCAN)
ipw_assoc(ic, vap);
}
break;
case IEEE80211_S_INIT:
if (sc->flags & IPW_FLAG_ASSOCIATED)
ipw_disassoc(ic, vap);
break;
case IEEE80211_S_AUTH:
/*
* Move to ASSOC state after the ipw_assoc() call. Firmware
* takes care of authentication, after the call we'll receive
* only an assoc response which would otherwise be discared
* if we are still in AUTH state.
*/
nstate = IEEE80211_S_ASSOC;
ipw_assoc(ic, vap);
break;
case IEEE80211_S_ASSOC:
/*
* If we are not transitioning from AUTH then resend the
* association request.
*/
if (ostate != IEEE80211_S_AUTH)
ipw_assoc(ic, vap);
break;
default:
break;
}
IEEE80211_LOCK(ic);
return ivp->newstate(vap, nstate, arg);
}
/*
* Read 16 bits at address 'addr' from the serial EEPROM.
*/
static uint16_t
ipw_read_prom_word(struct ipw_softc *sc, uint8_t addr)
{
uint32_t tmp;
uint16_t val;
int n;
/* clock C once before the first command */
IPW_EEPROM_CTL(sc, 0);
IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C);
IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
/* write start bit (1) */
IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D);
IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D | IPW_EEPROM_C);
/* write READ opcode (10) */
IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D);
IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_D | IPW_EEPROM_C);
IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C);
/* write address A7-A0 */
for (n = 7; n >= 0; n--) {
IPW_EEPROM_CTL(sc, IPW_EEPROM_S |
(((addr >> n) & 1) << IPW_EEPROM_SHIFT_D));
IPW_EEPROM_CTL(sc, IPW_EEPROM_S |
(((addr >> n) & 1) << IPW_EEPROM_SHIFT_D) | IPW_EEPROM_C);
}
IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
/* read data Q15-Q0 */
val = 0;
for (n = 15; n >= 0; n--) {
IPW_EEPROM_CTL(sc, IPW_EEPROM_S | IPW_EEPROM_C);
IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
tmp = MEM_READ_4(sc, IPW_MEM_EEPROM_CTL);
val |= ((tmp & IPW_EEPROM_Q) >> IPW_EEPROM_SHIFT_Q) << n;
}
IPW_EEPROM_CTL(sc, 0);
/* clear Chip Select and clock C */
IPW_EEPROM_CTL(sc, IPW_EEPROM_S);
IPW_EEPROM_CTL(sc, 0);
IPW_EEPROM_CTL(sc, IPW_EEPROM_C);
return le16toh(val);
}
static void
ipw_rx_cmd_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf)
{
struct ipw_cmd *cmd;
bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD);
cmd = mtod(sbuf->m, struct ipw_cmd *);
DPRINTFN(9, ("cmd ack'ed %s(%u, %u, %u, %u, %u)\n",
ipw_cmdname(le32toh(cmd->type)), le32toh(cmd->type),
le32toh(cmd->subtype), le32toh(cmd->seq), le32toh(cmd->len),
le32toh(cmd->status)));
sc->flags &= ~IPW_FLAG_BUSY;
wakeup(sc);
}
static void
ipw_rx_newstate_intr(struct ipw_softc *sc, struct ipw_soft_buf *sbuf)
{
#define IEEESTATE(vap) ieee80211_state_name[vap->iv_state]
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t state;
bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD);
state = le32toh(*mtod(sbuf->m, uint32_t *));
switch (state) {
case IPW_STATE_ASSOCIATED:
DPRINTFN(2, ("Association succeeded (%s flags 0x%x)\n",
IEEESTATE(vap), sc->flags));
/* XXX suppress state change in case the fw auto-associates */
if ((sc->flags & IPW_FLAG_ASSOCIATING) == 0) {
DPRINTF(("Unexpected association (%s, flags 0x%x)\n",
IEEESTATE(vap), sc->flags));
break;
}
sc->flags &= ~IPW_FLAG_ASSOCIATING;
sc->flags |= IPW_FLAG_ASSOCIATED;
break;
case IPW_STATE_SCANNING:
DPRINTFN(3, ("Scanning (%s flags 0x%x)\n",
IEEESTATE(vap), sc->flags));
/*
* NB: Check driver state for association on assoc
* loss as the firmware will immediately start to
* scan and we would treat it as a beacon miss if
* we checked the 802.11 layer state.
*/
if (sc->flags & IPW_FLAG_ASSOCIATED) {
IPW_UNLOCK(sc);
/* XXX probably need to issue disassoc to fw */
ieee80211_beacon_miss(ic);
IPW_LOCK(sc);
}
break;
case IPW_STATE_SCAN_COMPLETE:
/*
* XXX For some reason scan requests generate scan
* started + scan done events before any traffic is
* received (e.g. probe response frames). We work
* around this by marking the HACK flag and skipping
* the first scan complete event.
*/
DPRINTFN(3, ("Scan complete (%s flags 0x%x)\n",
IEEESTATE(vap), sc->flags));
if (sc->flags & IPW_FLAG_HACK) {
sc->flags &= ~IPW_FLAG_HACK;
break;
}
if (sc->flags & IPW_FLAG_SCANNING) {
IPW_UNLOCK(sc);
ieee80211_scan_done(vap);
IPW_LOCK(sc);
sc->flags &= ~IPW_FLAG_SCANNING;
sc->sc_scan_timer = 0;
}
break;
case IPW_STATE_ASSOCIATION_LOST:
DPRINTFN(2, ("Association lost (%s flags 0x%x)\n",
IEEESTATE(vap), sc->flags));
sc->flags &= ~(IPW_FLAG_ASSOCIATING | IPW_FLAG_ASSOCIATED);
if (vap->iv_state == IEEE80211_S_RUN) {
IPW_UNLOCK(sc);
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
IPW_LOCK(sc);
}
break;
case IPW_STATE_DISABLED:
/* XXX? is this right? */
sc->flags &= ~(IPW_FLAG_HACK | IPW_FLAG_SCANNING |
IPW_FLAG_ASSOCIATING | IPW_FLAG_ASSOCIATED);
DPRINTFN(2, ("Firmware disabled (%s flags 0x%x)\n",
IEEESTATE(vap), sc->flags));
break;
case IPW_STATE_RADIO_DISABLED:
device_printf(sc->sc_dev, "radio turned off\n");
ieee80211_notify_radio(ic, 0);
ipw_stop_locked(sc);
/* XXX start polling thread to detect radio on */
break;
default:
DPRINTFN(2, ("%s: unhandled state %u %s flags 0x%x\n",
__func__, state, IEEESTATE(vap), sc->flags));
break;
}
#undef IEEESTATE
}
/*
* Set driver state for current channel.
*/
static void
ipw_setcurchan(struct ipw_softc *sc, struct ieee80211_channel *chan)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
ic->ic_curchan = chan;
ieee80211_radiotap_chan_change(ic);
}
/*
* XXX: Hack to set the current channel to the value advertised in beacons or
* probe responses. Only used during AP detection.
*/
static void
ipw_fix_channel(struct ipw_softc *sc, struct mbuf *m)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_channel *c;
struct ieee80211_frame *wh;
uint8_t subtype;
uint8_t *frm, *efrm;
wh = mtod(m, struct ieee80211_frame *);
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
return;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
return;
/* XXX use ieee80211_parse_beacon */
frm = (uint8_t *)(wh + 1);
efrm = mtod(m, uint8_t *) + m->m_len;
frm += 12; /* skip tstamp, bintval and capinfo fields */
while (frm < efrm) {
if (*frm == IEEE80211_ELEMID_DSPARMS)
#if IEEE80211_CHAN_MAX < 255
if (frm[2] <= IEEE80211_CHAN_MAX)
#endif
{
DPRINTF(("Fixing channel to %d\n", frm[2]));
c = ieee80211_find_channel(ic,
ieee80211_ieee2mhz(frm[2], 0),
IEEE80211_CHAN_B);
if (c == NULL)
c = &ic->ic_channels[0];
ipw_setcurchan(sc, c);
}
frm += frm[1] + 2;
}
}
static void
ipw_rx_data_intr(struct ipw_softc *sc, struct ipw_status *status,
struct ipw_soft_bd *sbd, struct ipw_soft_buf *sbuf)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mbuf *mnew, *m;
struct ieee80211_node *ni;
bus_addr_t physaddr;
int error;
int8_t rssi, nf;
DPRINTFN(5, ("received frame len=%u, rssi=%u\n", le32toh(status->len),
status->rssi));
if (le32toh(status->len) < sizeof (struct ieee80211_frame_min) ||
le32toh(status->len) > MCLBYTES)
return;
/*
* Try to allocate a new mbuf for this ring element and load it before
* processing the current mbuf. If the ring element cannot be loaded,
* drop the received packet and reuse the old mbuf. In the unlikely
* case that the old mbuf can't be reloaded either, explicitly panic.
*/
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
bus_dmamap_sync(sc->rxbuf_dmat, sbuf->map, BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxbuf_dmat, sbuf->map);
error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map, mtod(mnew, void *),
MCLBYTES, ipw_dma_map_addr, &physaddr, 0);
if (error != 0) {
m_freem(mnew);
/* try to reload the old mbuf */
error = bus_dmamap_load(sc->rxbuf_dmat, sbuf->map,
mtod(sbuf->m, void *), MCLBYTES, ipw_dma_map_addr,
&physaddr, 0);
if (error != 0) {
/* very unlikely that it will fail... */
panic("%s: could not load old rx mbuf",
device_get_name(sc->sc_dev));
}
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
/*
* New mbuf successfully loaded, update Rx ring and continue
* processing.
*/
m = sbuf->m;
sbuf->m = mnew;
sbd->bd->physaddr = htole32(physaddr);
-
- /* finalize mbuf */
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = le32toh(status->len);
rssi = status->rssi + IPW_RSSI_TO_DBM;
nf = -95;
if (ieee80211_radiotap_active(ic)) {
struct ipw_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
tap->wr_antsignal = rssi;
tap->wr_antnoise = nf;
}
if (sc->flags & IPW_FLAG_SCANNING)
ipw_fix_channel(sc, m);
IPW_UNLOCK(sc);
ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *));
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi - nf, nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi - nf, nf);
IPW_LOCK(sc);
bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE);
}
static void
ipw_rx_intr(struct ipw_softc *sc)
{
struct ipw_status *status;
struct ipw_soft_bd *sbd;
struct ipw_soft_buf *sbuf;
uint32_t r, i;
if (!(sc->flags & IPW_FLAG_FW_INITED))
return;
r = CSR_READ_4(sc, IPW_CSR_RX_READ);
bus_dmamap_sync(sc->status_dmat, sc->status_map, BUS_DMASYNC_POSTREAD);
for (i = (sc->rxcur + 1) % IPW_NRBD; i != r; i = (i + 1) % IPW_NRBD) {
status = &sc->status_list[i];
sbd = &sc->srbd_list[i];
sbuf = sbd->priv;
switch (le16toh(status->code) & 0xf) {
case IPW_STATUS_CODE_COMMAND:
ipw_rx_cmd_intr(sc, sbuf);
break;
case IPW_STATUS_CODE_NEWSTATE:
ipw_rx_newstate_intr(sc, sbuf);
break;
case IPW_STATUS_CODE_DATA_802_3:
case IPW_STATUS_CODE_DATA_802_11:
ipw_rx_data_intr(sc, status, sbd, sbuf);
break;
case IPW_STATUS_CODE_NOTIFICATION:
DPRINTFN(2, ("notification status, len %u flags 0x%x\n",
le32toh(status->len), status->flags));
/* XXX maybe drive state machine AUTH->ASSOC? */
break;
default:
device_printf(sc->sc_dev, "unexpected status code %u\n",
le16toh(status->code));
}
/* firmware was killed, stop processing received frames */
if (!(sc->flags & IPW_FLAG_FW_INITED))
return;
sbd->bd->flags = 0;
}
bus_dmamap_sync(sc->rbd_dmat, sc->rbd_map, BUS_DMASYNC_PREWRITE);
/* kick the firmware */
sc->rxcur = (r == 0) ? IPW_NRBD - 1 : r - 1;
CSR_WRITE_4(sc, IPW_CSR_RX_WRITE, sc->rxcur);
}
static void
ipw_release_sbd(struct ipw_softc *sc, struct ipw_soft_bd *sbd)
{
struct ipw_soft_hdr *shdr;
struct ipw_soft_buf *sbuf;
switch (sbd->type) {
case IPW_SBD_TYPE_COMMAND:
bus_dmamap_sync(sc->cmd_dmat, sc->cmd_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->cmd_dmat, sc->cmd_map);
break;
case IPW_SBD_TYPE_HEADER:
shdr = sbd->priv;
bus_dmamap_sync(sc->hdr_dmat, shdr->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->hdr_dmat, shdr->map);
SLIST_INSERT_HEAD(&sc->free_shdr, shdr, next);
break;
case IPW_SBD_TYPE_DATA:
sbuf = sbd->priv;
bus_dmamap_sync(sc->txbuf_dmat, sbuf->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txbuf_dmat, sbuf->map);
SLIST_INSERT_HEAD(&sc->free_sbuf, sbuf, next);
if (sbuf->m->m_flags & M_TXCB)
ieee80211_process_callback(sbuf->ni, sbuf->m, 0/*XXX*/);
m_freem(sbuf->m);
ieee80211_free_node(sbuf->ni);
sc->sc_tx_timer = 0;
break;
}
sbd->type = IPW_SBD_TYPE_NOASSOC;
}
static void
ipw_tx_intr(struct ipw_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
struct ipw_soft_bd *sbd;
uint32_t r, i;
if (!(sc->flags & IPW_FLAG_FW_INITED))
return;
r = CSR_READ_4(sc, IPW_CSR_TX_READ);
for (i = (sc->txold + 1) % IPW_NTBD; i != r; i = (i + 1) % IPW_NTBD) {
sbd = &sc->stbd_list[i];
-
- if (sbd->type == IPW_SBD_TYPE_DATA)
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
ipw_release_sbd(sc, sbd);
sc->txfree++;
}
/* remember what the firmware has processed */
sc->txold = (r == 0) ? IPW_NTBD - 1 : r - 1;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ipw_start_locked(ifp);
+ ipw_start(sc);
}
static void
ipw_fatal_error_intr(struct ipw_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
device_printf(sc->sc_dev, "firmware error\n");
if (vap != NULL) {
IPW_UNLOCK(sc);
ieee80211_cancel_scan(vap);
IPW_LOCK(sc);
}
ieee80211_runtask(ic, &sc->sc_init_task);
}
static void
ipw_intr(void *arg)
{
struct ipw_softc *sc = arg;
uint32_t r;
IPW_LOCK(sc);
r = CSR_READ_4(sc, IPW_CSR_INTR);
if (r == 0 || r == 0xffffffff)
goto done;
/* disable interrupts */
CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0);
/* acknowledge all interrupts */
CSR_WRITE_4(sc, IPW_CSR_INTR, r);
if (r & (IPW_INTR_FATAL_ERROR | IPW_INTR_PARITY_ERROR)) {
ipw_fatal_error_intr(sc);
goto done;
}
if (r & IPW_INTR_FW_INIT_DONE)
wakeup(sc);
if (r & IPW_INTR_RX_TRANSFER)
ipw_rx_intr(sc);
if (r & IPW_INTR_TX_TRANSFER)
ipw_tx_intr(sc);
/* re-enable interrupts */
CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK);
done:
IPW_UNLOCK(sc);
}
static void
ipw_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static const char *
ipw_cmdname(int cmd)
{
#define N(a) (sizeof(a) / sizeof(a[0]))
static const struct {
int cmd;
const char *name;
} cmds[] = {
{ IPW_CMD_ADD_MULTICAST, "ADD_MULTICAST" },
{ IPW_CMD_BROADCAST_SCAN, "BROADCAST_SCAN" },
{ IPW_CMD_DISABLE, "DISABLE" },
{ IPW_CMD_DISABLE_PHY, "DISABLE_PHY" },
{ IPW_CMD_ENABLE, "ENABLE" },
{ IPW_CMD_PREPARE_POWER_DOWN, "PREPARE_POWER_DOWN" },
{ IPW_CMD_SET_BASIC_TX_RATES, "SET_BASIC_TX_RATES" },
{ IPW_CMD_SET_BEACON_INTERVAL, "SET_BEACON_INTERVAL" },
{ IPW_CMD_SET_CHANNEL, "SET_CHANNEL" },
{ IPW_CMD_SET_CONFIGURATION, "SET_CONFIGURATION" },
{ IPW_CMD_SET_DESIRED_BSSID, "SET_DESIRED_BSSID" },
{ IPW_CMD_SET_ESSID, "SET_ESSID" },
{ IPW_CMD_SET_FRAG_THRESHOLD, "SET_FRAG_THRESHOLD" },
{ IPW_CMD_SET_MAC_ADDRESS, "SET_MAC_ADDRESS" },
{ IPW_CMD_SET_MANDATORY_BSSID, "SET_MANDATORY_BSSID" },
{ IPW_CMD_SET_MODE, "SET_MODE" },
{ IPW_CMD_SET_MSDU_TX_RATES, "SET_MSDU_TX_RATES" },
{ IPW_CMD_SET_POWER_MODE, "SET_POWER_MODE" },
{ IPW_CMD_SET_RTS_THRESHOLD, "SET_RTS_THRESHOLD" },
{ IPW_CMD_SET_SCAN_OPTIONS, "SET_SCAN_OPTIONS" },
{ IPW_CMD_SET_SECURITY_INFO, "SET_SECURITY_INFO" },
{ IPW_CMD_SET_TX_POWER_INDEX, "SET_TX_POWER_INDEX" },
{ IPW_CMD_SET_TX_RATES, "SET_TX_RATES" },
{ IPW_CMD_SET_WEP_FLAGS, "SET_WEP_FLAGS" },
{ IPW_CMD_SET_WEP_KEY, "SET_WEP_KEY" },
{ IPW_CMD_SET_WEP_KEY_INDEX, "SET_WEP_KEY_INDEX" },
{ IPW_CMD_SET_WPA_IE, "SET_WPA_IE" },
};
static char buf[12];
int i;
for (i = 0; i < N(cmds); i++)
if (cmds[i].cmd == cmd)
return cmds[i].name;
snprintf(buf, sizeof(buf), "%u", cmd);
return buf;
#undef N
}
/*
* Send a command to the firmware and wait for the acknowledgement.
*/
static int
ipw_cmd(struct ipw_softc *sc, uint32_t type, void *data, uint32_t len)
{
struct ipw_soft_bd *sbd;
bus_addr_t physaddr;
int error;
IPW_LOCK_ASSERT(sc);
if (sc->flags & IPW_FLAG_BUSY) {
device_printf(sc->sc_dev, "%s: %s not sent, busy\n",
__func__, ipw_cmdname(type));
return EAGAIN;
}
sc->flags |= IPW_FLAG_BUSY;
sbd = &sc->stbd_list[sc->txcur];
error = bus_dmamap_load(sc->cmd_dmat, sc->cmd_map, &sc->cmd,
sizeof (struct ipw_cmd), ipw_dma_map_addr, &physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map command DMA memory\n");
sc->flags &= ~IPW_FLAG_BUSY;
return error;
}
sc->cmd.type = htole32(type);
sc->cmd.subtype = 0;
sc->cmd.len = htole32(len);
sc->cmd.seq = 0;
memcpy(sc->cmd.data, data, len);
sbd->type = IPW_SBD_TYPE_COMMAND;
sbd->bd->physaddr = htole32(physaddr);
sbd->bd->len = htole32(sizeof (struct ipw_cmd));
sbd->bd->nfrag = 1;
sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_COMMAND |
IPW_BD_FLAG_TX_LAST_FRAGMENT;
bus_dmamap_sync(sc->cmd_dmat, sc->cmd_map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->tbd_dmat, sc->tbd_map, BUS_DMASYNC_PREWRITE);
#ifdef IPW_DEBUG
if (ipw_debug >= 4) {
printf("sending %s(%u, %u, %u, %u)", ipw_cmdname(type), type,
0, 0, len);
/* Print the data buffer in the higher debug level */
if (ipw_debug >= 9 && len > 0) {
printf(" data: 0x");
for (int i = 1; i <= len; i++)
printf("%1D", (u_char *)data + len - i, "");
}
printf("\n");
}
#endif
/* kick firmware */
sc->txfree--;
sc->txcur = (sc->txcur + 1) % IPW_NTBD;
CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur);
/* wait at most one second for command to complete */
error = msleep(sc, &sc->sc_mtx, 0, "ipwcmd", hz);
if (error != 0) {
device_printf(sc->sc_dev, "%s: %s failed, timeout (error %u)\n",
__func__, ipw_cmdname(type), error);
sc->flags &= ~IPW_FLAG_BUSY;
return (error);
}
return (0);
}
static int
-ipw_tx_start(struct ifnet *ifp, struct mbuf *m0, struct ieee80211_node *ni)
+ipw_tx_start(struct ipw_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
{
- struct ieee80211com *ic = ifp->if_l2com;
- struct ipw_softc *sc = ic->ic_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_frame *wh;
struct ipw_soft_bd *sbd;
struct ipw_soft_hdr *shdr;
struct ipw_soft_buf *sbuf;
struct ieee80211_key *k;
struct mbuf *mnew;
bus_dma_segment_t segs[IPW_MAX_NSEG];
bus_addr_t physaddr;
int nsegs, error, i;
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
struct ipw_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
ieee80211_radiotap_tx(vap, m0);
}
shdr = SLIST_FIRST(&sc->free_shdr);
sbuf = SLIST_FIRST(&sc->free_sbuf);
KASSERT(shdr != NULL && sbuf != NULL, ("empty sw hdr/buf pool"));
shdr->hdr.type = htole32(IPW_HDR_TYPE_SEND);
shdr->hdr.subtype = 0;
shdr->hdr.encrypted = (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) ? 1 : 0;
shdr->hdr.encrypt = 0;
shdr->hdr.keyidx = 0;
shdr->hdr.keysz = 0;
shdr->hdr.fragmentsz = 0;
IEEE80211_ADDR_COPY(shdr->hdr.src_addr, wh->i_addr2);
if (ic->ic_opmode == IEEE80211_M_STA)
IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr3);
else
IEEE80211_ADDR_COPY(shdr->hdr.dst_addr, wh->i_addr1);
/* trim IEEE802.11 header */
m_adj(m0, sizeof (struct ieee80211_frame));
error = bus_dmamap_load_mbuf_sg(sc->txbuf_dmat, sbuf->map, m0, segs,
&nsegs, 0);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
m_freem(m0);
return ENOBUFS;
}
m0 = mnew;
error = bus_dmamap_load_mbuf_sg(sc->txbuf_dmat, sbuf->map, m0,
segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(m0);
return error;
}
}
error = bus_dmamap_load(sc->hdr_dmat, shdr->map, &shdr->hdr,
sizeof (struct ipw_hdr), ipw_dma_map_addr, &physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map header DMA memory\n");
bus_dmamap_unload(sc->txbuf_dmat, sbuf->map);
m_freem(m0);
return error;
}
SLIST_REMOVE_HEAD(&sc->free_sbuf, next);
SLIST_REMOVE_HEAD(&sc->free_shdr, next);
sbd = &sc->stbd_list[sc->txcur];
sbd->type = IPW_SBD_TYPE_HEADER;
sbd->priv = shdr;
sbd->bd->physaddr = htole32(physaddr);
sbd->bd->len = htole32(sizeof (struct ipw_hdr));
sbd->bd->nfrag = 1 + nsegs;
sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3 |
IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT;
DPRINTFN(5, ("sending tx hdr (%u, %u, %u, %u, %6D, %6D)\n",
shdr->hdr.type, shdr->hdr.subtype, shdr->hdr.encrypted,
shdr->hdr.encrypt, shdr->hdr.src_addr, ":", shdr->hdr.dst_addr,
":"));
sc->txfree--;
sc->txcur = (sc->txcur + 1) % IPW_NTBD;
sbuf->m = m0;
sbuf->ni = ni;
for (i = 0; i < nsegs; i++) {
sbd = &sc->stbd_list[sc->txcur];
sbd->bd->physaddr = htole32(segs[i].ds_addr);
sbd->bd->len = htole32(segs[i].ds_len);
sbd->bd->nfrag = 0;
sbd->bd->flags = IPW_BD_FLAG_TX_FRAME_802_3;
if (i == nsegs - 1) {
sbd->type = IPW_SBD_TYPE_DATA;
sbd->priv = sbuf;
sbd->bd->flags |= IPW_BD_FLAG_TX_LAST_FRAGMENT;
} else {
sbd->type = IPW_SBD_TYPE_NOASSOC;
sbd->bd->flags |= IPW_BD_FLAG_TX_NOT_LAST_FRAGMENT;
}
DPRINTFN(5, ("sending fragment (%d)\n", i));
sc->txfree--;
sc->txcur = (sc->txcur + 1) % IPW_NTBD;
}
bus_dmamap_sync(sc->hdr_dmat, shdr->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->txbuf_dmat, sbuf->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->tbd_dmat, sc->tbd_map, BUS_DMASYNC_PREWRITE);
/* kick firmware */
CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur);
return 0;
}
static int
ipw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
/* no support; just discard */
m_freem(m);
ieee80211_free_node(ni);
return 0;
}
-static void
-ipw_start(struct ifnet *ifp)
+static int
+ipw_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct ipw_softc *sc = ifp->if_softc;
+ struct ipw_softc *sc = ic->ic_softc;
+ int error;
IPW_LOCK(sc);
- ipw_start_locked(ifp);
+ if ((sc->flags & IPW_FLAG_RUNNING) == 0) {
+ IPW_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ IPW_UNLOCK(sc);
+ return (error);
+ }
+ ipw_start(sc);
IPW_UNLOCK(sc);
+ return (0);
}
static void
-ipw_start_locked(struct ifnet *ifp)
+ipw_start(struct ipw_softc *sc)
{
- struct ipw_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
IPW_LOCK_ASSERT(sc);
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
- if (sc->txfree < 1 + IPW_MAX_NSEG) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
+ while (sc->txfree < 1 + IPW_MAX_NSEG &&
+ (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
- if (ipw_tx_start(ifp, m, ni) != 0) {
+ if (ipw_tx_start(sc, m, ni) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
break;
}
/* start watchdog timer */
sc->sc_tx_timer = 5;
}
}
static void
ipw_watchdog(void *arg)
{
struct ipw_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
IPW_LOCK_ASSERT(sc);
if (sc->sc_tx_timer > 0) {
if (--sc->sc_tx_timer == 0) {
- if_printf(ifp, "device timeout\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ device_printf(sc->sc_dev, "device timeout\n");
+ counter_u64_add(ic->ic_oerrors, 1);
taskqueue_enqueue(taskqueue_swi, &sc->sc_init_task);
}
}
if (sc->sc_scan_timer > 0) {
if (--sc->sc_scan_timer == 0) {
DPRINTFN(3, ("Scan timeout\n"));
/* End the scan */
if (sc->flags & IPW_FLAG_SCANNING) {
IPW_UNLOCK(sc);
ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
IPW_LOCK(sc);
sc->flags &= ~IPW_FLAG_SCANNING;
}
}
}
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->flags & IPW_FLAG_RUNNING)
callout_reset(&sc->sc_wdtimer, hz, ipw_watchdog, sc);
}
-static int
-ipw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+ipw_parent(struct ieee80211com *ic)
{
- struct ipw_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ struct ipw_softc *sc = ic->ic_softc;
+ int startall = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- IPW_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- ipw_init_locked(sc);
- startall = 1;
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ipw_stop_locked(sc);
+ IPW_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ if (!(sc->flags & IPW_FLAG_RUNNING)) {
+ ipw_init_locked(sc);
+ startall = 1;
}
- IPW_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
- }
- return error;
+ } else if (sc->flags & IPW_FLAG_RUNNING)
+ ipw_stop_locked(sc);
+ IPW_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static void
ipw_stop_master(struct ipw_softc *sc)
{
uint32_t tmp;
int ntries;
/* disable interrupts */
CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, 0);
CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_STOP_MASTER);
for (ntries = 0; ntries < 50; ntries++) {
if (CSR_READ_4(sc, IPW_CSR_RST) & IPW_RST_MASTER_DISABLED)
break;
DELAY(10);
}
if (ntries == 50)
device_printf(sc->sc_dev, "timeout waiting for master\n");
tmp = CSR_READ_4(sc, IPW_CSR_RST);
CSR_WRITE_4(sc, IPW_CSR_RST, tmp | IPW_RST_PRINCETON_RESET);
/* Clear all flags except the following */
sc->flags &= IPW_FLAG_HAS_RADIO_SWITCH;
}
static int
ipw_reset(struct ipw_softc *sc)
{
uint32_t tmp;
int ntries;
ipw_stop_master(sc);
/* move adapter to D0 state */
tmp = CSR_READ_4(sc, IPW_CSR_CTL);
CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_INIT);
/* wait for clock stabilization */
for (ntries = 0; ntries < 1000; ntries++) {
if (CSR_READ_4(sc, IPW_CSR_CTL) & IPW_CTL_CLOCK_READY)
break;
DELAY(200);
}
if (ntries == 1000)
return EIO;
tmp = CSR_READ_4(sc, IPW_CSR_RST);
CSR_WRITE_4(sc, IPW_CSR_RST, tmp | IPW_RST_SW_RESET);
DELAY(10);
tmp = CSR_READ_4(sc, IPW_CSR_CTL);
CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_INIT);
return 0;
}
static int
ipw_waitfordisable(struct ipw_softc *sc, int waitfor)
{
int ms = hz < 1000 ? 1 : hz/10;
int i, error;
for (i = 0; i < 100; i++) {
if (ipw_read_table1(sc, IPW_INFO_CARD_DISABLED) == waitfor)
return 0;
error = msleep(sc, &sc->sc_mtx, PCATCH, __func__, ms);
if (error == 0 || error != EWOULDBLOCK)
return 0;
}
DPRINTF(("%s: timeout waiting for %s\n",
__func__, waitfor ? "disable" : "enable"));
return ETIMEDOUT;
}
static int
ipw_enable(struct ipw_softc *sc)
{
int error;
if ((sc->flags & IPW_FLAG_ENABLED) == 0) {
DPRINTF(("Enable adapter\n"));
error = ipw_cmd(sc, IPW_CMD_ENABLE, NULL, 0);
if (error != 0)
return error;
error = ipw_waitfordisable(sc, 0);
if (error != 0)
return error;
sc->flags |= IPW_FLAG_ENABLED;
}
return 0;
}
static int
ipw_disable(struct ipw_softc *sc)
{
int error;
if (sc->flags & IPW_FLAG_ENABLED) {
DPRINTF(("Disable adapter\n"));
error = ipw_cmd(sc, IPW_CMD_DISABLE, NULL, 0);
if (error != 0)
return error;
error = ipw_waitfordisable(sc, 1);
if (error != 0)
return error;
sc->flags &= ~IPW_FLAG_ENABLED;
}
return 0;
}
/*
* Upload the microcode to the device.
*/
static int
ipw_load_ucode(struct ipw_softc *sc, const char *uc, int size)
{
int ntries;
MEM_WRITE_4(sc, 0x3000e0, 0x80000000);
CSR_WRITE_4(sc, IPW_CSR_RST, 0);
MEM_WRITE_2(sc, 0x220000, 0x0703);
MEM_WRITE_2(sc, 0x220000, 0x0707);
MEM_WRITE_1(sc, 0x210014, 0x72);
MEM_WRITE_1(sc, 0x210014, 0x72);
MEM_WRITE_1(sc, 0x210000, 0x40);
MEM_WRITE_1(sc, 0x210000, 0x00);
MEM_WRITE_1(sc, 0x210000, 0x40);
MEM_WRITE_MULTI_1(sc, 0x210010, uc, size);
MEM_WRITE_1(sc, 0x210000, 0x00);
MEM_WRITE_1(sc, 0x210000, 0x00);
MEM_WRITE_1(sc, 0x210000, 0x80);
MEM_WRITE_2(sc, 0x220000, 0x0703);
MEM_WRITE_2(sc, 0x220000, 0x0707);
MEM_WRITE_1(sc, 0x210014, 0x72);
MEM_WRITE_1(sc, 0x210014, 0x72);
MEM_WRITE_1(sc, 0x210000, 0x00);
MEM_WRITE_1(sc, 0x210000, 0x80);
for (ntries = 0; ntries < 10; ntries++) {
if (MEM_READ_1(sc, 0x210000) & 1)
break;
DELAY(10);
}
if (ntries == 10) {
device_printf(sc->sc_dev,
"timeout waiting for ucode to initialize\n");
return EIO;
}
MEM_WRITE_4(sc, 0x3000e0, 0);
return 0;
}
/* set of macros to handle unaligned little endian data in firmware image */
#define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24)
#define GETLE16(p) ((p)[0] | (p)[1] << 8)
static int
ipw_load_firmware(struct ipw_softc *sc, const char *fw, int size)
{
const uint8_t *p, *end;
uint32_t tmp, dst;
uint16_t len;
int error;
p = fw;
end = fw + size;
while (p < end) {
dst = GETLE32(p); p += 4;
len = GETLE16(p); p += 2;
ipw_write_mem_1(sc, dst, p, len);
p += len;
}
CSR_WRITE_4(sc, IPW_CSR_IO, IPW_IO_GPIO1_ENABLE | IPW_IO_GPIO3_MASK |
IPW_IO_LED_OFF);
/* enable interrupts */
CSR_WRITE_4(sc, IPW_CSR_INTR_MASK, IPW_INTR_MASK);
/* kick the firmware */
CSR_WRITE_4(sc, IPW_CSR_RST, 0);
tmp = CSR_READ_4(sc, IPW_CSR_CTL);
CSR_WRITE_4(sc, IPW_CSR_CTL, tmp | IPW_CTL_ALLOW_STANDBY);
/* wait at most one second for firmware initialization to complete */
if ((error = msleep(sc, &sc->sc_mtx, 0, "ipwinit", hz)) != 0) {
device_printf(sc->sc_dev, "timeout waiting for firmware "
"initialization to complete\n");
return error;
}
tmp = CSR_READ_4(sc, IPW_CSR_IO);
CSR_WRITE_4(sc, IPW_CSR_IO, tmp | IPW_IO_GPIO1_MASK |
IPW_IO_GPIO3_MASK);
return 0;
}
static int
ipw_setwepkeys(struct ipw_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ipw_wep_key wepkey;
struct ieee80211_key *wk;
int error, i;
for (i = 0; i < IEEE80211_WEP_NKID; i++) {
wk = &vap->iv_nw_keys[i];
if (wk->wk_cipher == NULL ||
wk->wk_cipher->ic_cipher != IEEE80211_CIPHER_WEP)
continue;
wepkey.idx = i;
wepkey.len = wk->wk_keylen;
memset(wepkey.key, 0, sizeof wepkey.key);
memcpy(wepkey.key, wk->wk_key, wk->wk_keylen);
DPRINTF(("Setting wep key index %u len %u\n", wepkey.idx,
wepkey.len));
error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY, &wepkey,
sizeof wepkey);
if (error != 0)
return error;
}
return 0;
}
static int
ipw_setwpaie(struct ipw_softc *sc, const void *ie, int ielen)
{
struct ipw_wpa_ie wpaie;
memset(&wpaie, 0, sizeof(wpaie));
wpaie.len = htole32(ielen);
/* XXX verify length */
memcpy(&wpaie.ie, ie, ielen);
DPRINTF(("Setting WPA IE\n"));
return ipw_cmd(sc, IPW_CMD_SET_WPA_IE, &wpaie, sizeof(wpaie));
}
static int
ipw_setbssid(struct ipw_softc *sc, uint8_t *bssid)
{
static const uint8_t zerobssid[IEEE80211_ADDR_LEN];
if (bssid == NULL || bcmp(bssid, zerobssid, IEEE80211_ADDR_LEN) == 0) {
DPRINTF(("Setting mandatory BSSID to null\n"));
return ipw_cmd(sc, IPW_CMD_SET_MANDATORY_BSSID, NULL, 0);
} else {
DPRINTF(("Setting mandatory BSSID to %6D\n", bssid, ":"));
return ipw_cmd(sc, IPW_CMD_SET_MANDATORY_BSSID,
bssid, IEEE80211_ADDR_LEN);
}
}
static int
ipw_setssid(struct ipw_softc *sc, void *ssid, size_t ssidlen)
{
if (ssidlen == 0) {
/*
* A bug in the firmware breaks the ``don't associate''
* bit in the scan options command. To compensate for
* this install a bogus ssid when no ssid is specified
* so the firmware won't try to associate.
*/
DPRINTF(("Setting bogus ESSID to WAR firmware bug\n"));
return ipw_cmd(sc, IPW_CMD_SET_ESSID,
"\x18\x19\x20\x21\x22\x23\x24\x25\x26\x27"
"\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f\x30\x31"
"\x32\x33\x34\x35\x36\x37\x38\x39\x3a\x3b"
"\x3c\x3d", IEEE80211_NWID_LEN);
} else {
#ifdef IPW_DEBUG
if (ipw_debug > 0) {
printf("Setting ESSID to ");
ieee80211_print_essid(ssid, ssidlen);
printf("\n");
}
#endif
return ipw_cmd(sc, IPW_CMD_SET_ESSID, ssid, ssidlen);
}
}
static int
ipw_setscanopts(struct ipw_softc *sc, uint32_t chanmask, uint32_t flags)
{
struct ipw_scan_options opts;
DPRINTF(("Scan options: mask 0x%x flags 0x%x\n", chanmask, flags));
opts.channels = htole32(chanmask);
opts.flags = htole32(flags);
return ipw_cmd(sc, IPW_CMD_SET_SCAN_OPTIONS, &opts, sizeof(opts));
}
static int
ipw_scan(struct ipw_softc *sc)
{
uint32_t params;
int error;
DPRINTF(("%s: flags 0x%x\n", __func__, sc->flags));
if (sc->flags & IPW_FLAG_SCANNING)
return (EBUSY);
sc->flags |= IPW_FLAG_SCANNING | IPW_FLAG_HACK;
/* NB: IPW_SCAN_DO_NOT_ASSOCIATE does not work (we set it anyway) */
error = ipw_setscanopts(sc, 0x3fff, IPW_SCAN_DO_NOT_ASSOCIATE);
if (error != 0)
goto done;
/*
* Setup null/bogus ssid so firmware doesn't use any previous
* ssid to try and associate. This is because the ``don't
* associate'' option bit is broken (sigh).
*/
error = ipw_setssid(sc, NULL, 0);
if (error != 0)
goto done;
/*
* NB: the adapter may be disabled on association lost;
* if so just re-enable it to kick off scanning.
*/
DPRINTF(("Starting scan\n"));
sc->sc_scan_timer = 3;
if (sc->flags & IPW_FLAG_ENABLED) {
params = 0; /* XXX? */
error = ipw_cmd(sc, IPW_CMD_BROADCAST_SCAN,
&params, sizeof(params));
} else
error = ipw_enable(sc);
done:
if (error != 0) {
DPRINTF(("Scan failed\n"));
sc->flags &= ~(IPW_FLAG_SCANNING | IPW_FLAG_HACK);
}
return (error);
}
static int
ipw_setchannel(struct ipw_softc *sc, struct ieee80211_channel *chan)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t data;
int error;
data = htole32(ieee80211_chan2ieee(ic, chan));
DPRINTF(("Setting channel to %u\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_CHANNEL, &data, sizeof data);
if (error == 0)
ipw_setcurchan(sc, chan);
return error;
}
static void
ipw_assoc(struct ieee80211com *ic, struct ieee80211vap *vap)
{
struct ipw_softc *sc = ic->ic_softc;
struct ieee80211_node *ni = vap->iv_bss;
struct ipw_security security;
uint32_t data;
int error;
IPW_LOCK(sc);
error = ipw_disable(sc);
if (error != 0)
goto done;
memset(&security, 0, sizeof security);
security.authmode = (ni->ni_authmode == IEEE80211_AUTH_SHARED) ?
IPW_AUTH_SHARED : IPW_AUTH_OPEN;
security.ciphers = htole32(IPW_CIPHER_NONE);
DPRINTF(("Setting authmode to %u\n", security.authmode));
error = ipw_cmd(sc, IPW_CMD_SET_SECURITY_INFO, &security,
sizeof security);
if (error != 0)
goto done;
data = htole32(vap->iv_rtsthreshold);
DPRINTF(("Setting RTS threshold to %u\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_RTS_THRESHOLD, &data, sizeof data);
if (error != 0)
goto done;
data = htole32(vap->iv_fragthreshold);
DPRINTF(("Setting frag threshold to %u\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_FRAG_THRESHOLD, &data, sizeof data);
if (error != 0)
goto done;
if (vap->iv_flags & IEEE80211_F_PRIVACY) {
error = ipw_setwepkeys(sc);
if (error != 0)
goto done;
if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE) {
data = htole32(vap->iv_def_txkey);
DPRINTF(("Setting wep tx key index to %u\n",
le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_WEP_KEY_INDEX, &data,
sizeof data);
if (error != 0)
goto done;
}
}
data = htole32((vap->iv_flags & IEEE80211_F_PRIVACY) ? IPW_WEPON : 0);
DPRINTF(("Setting wep flags to 0x%x\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_WEP_FLAGS, &data, sizeof data);
if (error != 0)
goto done;
error = ipw_setssid(sc, ni->ni_essid, ni->ni_esslen);
if (error != 0)
goto done;
error = ipw_setbssid(sc, ni->ni_bssid);
if (error != 0)
goto done;
if (vap->iv_appie_wpa != NULL) {
struct ieee80211_appie *ie = vap->iv_appie_wpa;
error = ipw_setwpaie(sc, ie->ie_data, ie->ie_len);
if (error != 0)
goto done;
}
if (ic->ic_opmode == IEEE80211_M_IBSS) {
error = ipw_setchannel(sc, ni->ni_chan);
if (error != 0)
goto done;
}
/* lock scan to ap's channel and enable associate */
error = ipw_setscanopts(sc,
1<<(ieee80211_chan2ieee(ic, ni->ni_chan)-1), 0);
if (error != 0)
goto done;
error = ipw_enable(sc); /* finally, enable adapter */
if (error == 0)
sc->flags |= IPW_FLAG_ASSOCIATING;
done:
IPW_UNLOCK(sc);
}
static void
ipw_disassoc(struct ieee80211com *ic, struct ieee80211vap *vap)
{
struct ieee80211_node *ni = vap->iv_bss;
struct ipw_softc *sc = ic->ic_softc;
IPW_LOCK(sc);
DPRINTF(("Disassociate from %6D\n", ni->ni_bssid, ":"));
/*
* NB: don't try to do this if ipw_stop_master has
* shutdown the firmware and disabled interrupts.
*/
if (sc->flags & IPW_FLAG_FW_INITED) {
sc->flags &= ~IPW_FLAG_ASSOCIATED;
/*
* NB: firmware currently ignores bssid parameter, but
* supply it in case this changes (follow linux driver).
*/
(void) ipw_cmd(sc, IPW_CMD_DISASSOCIATE,
ni->ni_bssid, IEEE80211_ADDR_LEN);
}
IPW_UNLOCK(sc);
}
/*
* Handler for sc_init_task. This is a simple wrapper around ipw_init().
* It is called on firmware panics or on watchdog timeouts.
*/
static void
ipw_init_task(void *context, int pending)
{
ipw_init(context);
}
static void
ipw_init(void *priv)
{
struct ipw_softc *sc = priv;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
IPW_LOCK(sc);
ipw_init_locked(sc);
IPW_UNLOCK(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->flags & IPW_FLAG_RUNNING)
ieee80211_start_all(ic); /* start all vap's */
}
static void
ipw_init_locked(struct ipw_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
const struct firmware *fp;
const struct ipw_firmware_hdr *hdr;
const char *fw;
IPW_LOCK_ASSERT(sc);
DPRINTF(("%s: state %s flags 0x%x\n", __func__,
ieee80211_state_name[vap->iv_state], sc->flags));
/*
* Avoid re-entrant calls. We need to release the mutex in ipw_init()
* when loading the firmware and we don't want to be called during this
* operation.
*/
if (sc->flags & IPW_FLAG_INIT_LOCKED)
return;
sc->flags |= IPW_FLAG_INIT_LOCKED;
ipw_stop_locked(sc);
if (ipw_reset(sc) != 0) {
device_printf(sc->sc_dev, "could not reset adapter\n");
goto fail;
}
if (sc->sc_firmware == NULL) {
device_printf(sc->sc_dev, "no firmware\n");
goto fail;
}
/* NB: consistency already checked on load */
fp = sc->sc_firmware;
hdr = (const struct ipw_firmware_hdr *)fp->data;
DPRINTF(("Loading firmware image '%s'\n", fp->name));
fw = (const char *)fp->data + sizeof *hdr + le32toh(hdr->mainsz);
if (ipw_load_ucode(sc, fw, le32toh(hdr->ucodesz)) != 0) {
device_printf(sc->sc_dev, "could not load microcode\n");
goto fail;
}
ipw_stop_master(sc);
/*
* Setup tx, rx and status rings.
*/
sc->txold = IPW_NTBD - 1;
sc->txcur = 0;
sc->txfree = IPW_NTBD - 2;
sc->rxcur = IPW_NRBD - 1;
CSR_WRITE_4(sc, IPW_CSR_TX_BASE, sc->tbd_phys);
CSR_WRITE_4(sc, IPW_CSR_TX_SIZE, IPW_NTBD);
CSR_WRITE_4(sc, IPW_CSR_TX_READ, 0);
CSR_WRITE_4(sc, IPW_CSR_TX_WRITE, sc->txcur);
CSR_WRITE_4(sc, IPW_CSR_RX_BASE, sc->rbd_phys);
CSR_WRITE_4(sc, IPW_CSR_RX_SIZE, IPW_NRBD);
CSR_WRITE_4(sc, IPW_CSR_RX_READ, 0);
CSR_WRITE_4(sc, IPW_CSR_RX_WRITE, sc->rxcur);
CSR_WRITE_4(sc, IPW_CSR_STATUS_BASE, sc->status_phys);
fw = (const char *)fp->data + sizeof *hdr;
if (ipw_load_firmware(sc, fw, le32toh(hdr->mainsz)) != 0) {
device_printf(sc->sc_dev, "could not load firmware\n");
goto fail;
}
sc->flags |= IPW_FLAG_FW_INITED;
/* retrieve information tables base addresses */
sc->table1_base = CSR_READ_4(sc, IPW_CSR_TABLE1_BASE);
sc->table2_base = CSR_READ_4(sc, IPW_CSR_TABLE2_BASE);
ipw_write_table1(sc, IPW_INFO_LOCK, 0);
if (ipw_config(sc) != 0) {
device_printf(sc->sc_dev, "device configuration failed\n");
goto fail;
}
callout_reset(&sc->sc_wdtimer, hz, ipw_watchdog, sc);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
-
- sc->flags &=~ IPW_FLAG_INIT_LOCKED;
+ sc->flags |= IPW_FLAG_RUNNING;
+ sc->flags &= ~IPW_FLAG_INIT_LOCKED;
return;
fail:
ipw_stop_locked(sc);
- sc->flags &=~ IPW_FLAG_INIT_LOCKED;
+ sc->flags &= ~IPW_FLAG_INIT_LOCKED;
}
static int
ipw_config(struct ipw_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ipw_configuration config;
uint32_t data;
int error;
error = ipw_disable(sc);
if (error != 0)
return error;
switch (ic->ic_opmode) {
case IEEE80211_M_STA:
case IEEE80211_M_HOSTAP:
case IEEE80211_M_WDS: /* XXX */
data = htole32(IPW_MODE_BSS);
break;
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
data = htole32(IPW_MODE_IBSS);
break;
case IEEE80211_M_MONITOR:
data = htole32(IPW_MODE_MONITOR);
break;
default:
device_printf(sc->sc_dev, "unknown opmode %d\n", ic->ic_opmode);
return EINVAL;
}
DPRINTF(("Setting mode to %u\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_MODE, &data, sizeof data);
if (error != 0)
return error;
if (ic->ic_opmode == IEEE80211_M_IBSS ||
ic->ic_opmode == IEEE80211_M_MONITOR) {
error = ipw_setchannel(sc, ic->ic_curchan);
if (error != 0)
return error;
}
if (ic->ic_opmode == IEEE80211_M_MONITOR)
return ipw_enable(sc);
config.flags = htole32(IPW_CFG_BSS_MASK | IPW_CFG_IBSS_MASK |
IPW_CFG_PREAMBLE_AUTO | IPW_CFG_802_1x_ENABLE);
if (ic->ic_opmode == IEEE80211_M_IBSS)
config.flags |= htole32(IPW_CFG_IBSS_AUTO_START);
- if (ifp->if_flags & IFF_PROMISC)
+ if (ic->ic_promisc > 0)
config.flags |= htole32(IPW_CFG_PROMISCUOUS);
config.bss_chan = htole32(0x3fff); /* channels 1-14 */
config.ibss_chan = htole32(0x7ff); /* channels 1-11 */
DPRINTF(("Setting configuration to 0x%x\n", le32toh(config.flags)));
error = ipw_cmd(sc, IPW_CMD_SET_CONFIGURATION, &config, sizeof config);
if (error != 0)
return error;
data = htole32(0xf); /* 1, 2, 5.5, 11 */
DPRINTF(("Setting basic tx rates to 0x%x\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_BASIC_TX_RATES, &data, sizeof data);
if (error != 0)
return error;
/* Use the same rate set */
DPRINTF(("Setting msdu tx rates to 0x%x\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_MSDU_TX_RATES, &data, sizeof data);
if (error != 0)
return error;
/* Use the same rate set */
DPRINTF(("Setting tx rates to 0x%x\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_TX_RATES, &data, sizeof data);
if (error != 0)
return error;
data = htole32(IPW_POWER_MODE_CAM);
DPRINTF(("Setting power mode to %u\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_POWER_MODE, &data, sizeof data);
if (error != 0)
return error;
if (ic->ic_opmode == IEEE80211_M_IBSS) {
data = htole32(32); /* default value */
DPRINTF(("Setting tx power index to %u\n", le32toh(data)));
error = ipw_cmd(sc, IPW_CMD_SET_TX_POWER_INDEX, &data,
sizeof data);
if (error != 0)
return error;
}
return 0;
}
static void
ipw_stop(void *priv)
{
struct ipw_softc *sc = priv;
IPW_LOCK(sc);
ipw_stop_locked(sc);
IPW_UNLOCK(sc);
}
static void
ipw_stop_locked(struct ipw_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
int i;
IPW_LOCK_ASSERT(sc);
callout_stop(&sc->sc_wdtimer);
ipw_stop_master(sc);
CSR_WRITE_4(sc, IPW_CSR_RST, IPW_RST_SW_RESET);
/*
* Release tx buffers.
*/
for (i = 0; i < IPW_NTBD; i++)
ipw_release_sbd(sc, &sc->stbd_list[i]);
sc->sc_tx_timer = 0;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->flags &= ~IPW_FLAG_RUNNING;
}
static int
ipw_sysctl_stats(SYSCTL_HANDLER_ARGS)
{
struct ipw_softc *sc = arg1;
uint32_t i, size, buf[256];
memset(buf, 0, sizeof buf);
if (!(sc->flags & IPW_FLAG_FW_INITED))
return SYSCTL_OUT(req, buf, sizeof buf);
CSR_WRITE_4(sc, IPW_CSR_AUTOINC_ADDR, sc->table1_base);
size = min(CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA), 256);
for (i = 1; i < size; i++)
buf[i] = MEM_READ_4(sc, CSR_READ_4(sc, IPW_CSR_AUTOINC_DATA));
return SYSCTL_OUT(req, buf, size);
}
static int
ipw_sysctl_radio(SYSCTL_HANDLER_ARGS)
{
struct ipw_softc *sc = arg1;
int val;
val = !((sc->flags & IPW_FLAG_HAS_RADIO_SWITCH) &&
(CSR_READ_4(sc, IPW_CSR_IO) & IPW_IO_RADIO_DISABLED));
return SYSCTL_OUT(req, &val, sizeof val);
}
static uint32_t
ipw_read_table1(struct ipw_softc *sc, uint32_t off)
{
return MEM_READ_4(sc, MEM_READ_4(sc, sc->table1_base + off));
}
static void
ipw_write_table1(struct ipw_softc *sc, uint32_t off, uint32_t info)
{
MEM_WRITE_4(sc, MEM_READ_4(sc, sc->table1_base + off), info);
}
#if 0
static int
ipw_read_table2(struct ipw_softc *sc, uint32_t off, void *buf, uint32_t *len)
{
uint32_t addr, info;
uint16_t count, size;
uint32_t total;
/* addr[4] + count[2] + size[2] */
addr = MEM_READ_4(sc, sc->table2_base + off);
info = MEM_READ_4(sc, sc->table2_base + off + 4);
count = info >> 16;
size = info & 0xffff;
total = count * size;
if (total > *len) {
*len = total;
return EINVAL;
}
*len = total;
ipw_read_mem_1(sc, addr, buf, total);
return 0;
}
static void
ipw_read_mem_1(struct ipw_softc *sc, bus_size_t offset, uint8_t *datap,
bus_size_t count)
{
for (; count > 0; offset++, datap++, count--) {
CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3);
*datap = CSR_READ_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3));
}
}
#endif
static void
ipw_write_mem_1(struct ipw_softc *sc, bus_size_t offset, const uint8_t *datap,
bus_size_t count)
{
for (; count > 0; offset++, datap++, count--) {
CSR_WRITE_4(sc, IPW_CSR_INDIRECT_ADDR, offset & ~3);
CSR_WRITE_1(sc, IPW_CSR_INDIRECT_DATA + (offset & 3), *datap);
}
}
static void
ipw_scan_start(struct ieee80211com *ic)
{
struct ipw_softc *sc = ic->ic_softc;
IPW_LOCK(sc);
ipw_scan(sc);
IPW_UNLOCK(sc);
}
static void
ipw_set_channel(struct ieee80211com *ic)
{
struct ipw_softc *sc = ic->ic_softc;
IPW_LOCK(sc);
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
ipw_disable(sc);
ipw_setchannel(sc, ic->ic_curchan);
ipw_enable(sc);
}
IPW_UNLOCK(sc);
}
static void
ipw_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
{
/* NB: all channels are scanned at once */
}
static void
ipw_scan_mindwell(struct ieee80211_scan_state *ss)
{
/* NB: don't try to abort scan; wait for firmware to finish */
}
static void
ipw_scan_end(struct ieee80211com *ic)
{
struct ipw_softc *sc = ic->ic_softc;
IPW_LOCK(sc);
sc->flags &= ~IPW_FLAG_SCANNING;
IPW_UNLOCK(sc);
}
Index: head/sys/dev/ipw/if_ipwvar.h
===================================================================
--- head/sys/dev/ipw/if_ipwvar.h (revision 287196)
+++ head/sys/dev/ipw/if_ipwvar.h (revision 287197)
@@ -1,168 +1,170 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2004-2006
* Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#define IPW_MAX_NSEG 1
struct ipw_soft_bd {
struct ipw_bd *bd;
int type;
#define IPW_SBD_TYPE_NOASSOC 0
#define IPW_SBD_TYPE_COMMAND 1
#define IPW_SBD_TYPE_HEADER 2
#define IPW_SBD_TYPE_DATA 3
void *priv;
};
struct ipw_soft_hdr {
struct ipw_hdr hdr;
bus_dmamap_t map;
SLIST_ENTRY(ipw_soft_hdr) next;
};
struct ipw_soft_buf {
struct mbuf *m;
struct ieee80211_node *ni;
bus_dmamap_t map;
SLIST_ENTRY(ipw_soft_buf) next;
};
struct ipw_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
};
#define IPW_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DB_ANTNOISE))
struct ipw_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
};
#define IPW_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct ipw_vap {
struct ieee80211vap vap;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define IPW_VAP(vap) ((struct ipw_vap *)(vap))
struct ipw_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct mtx sc_mtx;
struct task sc_init_task;
struct callout sc_wdtimer; /* watchdog timer */
uint32_t flags;
#define IPW_FLAG_FW_INITED 0x0001
#define IPW_FLAG_INIT_LOCKED 0x0002
#define IPW_FLAG_HAS_RADIO_SWITCH 0x0004
#define IPW_FLAG_HACK 0x0008
#define IPW_FLAG_SCANNING 0x0010
#define IPW_FLAG_ENABLED 0x0020
#define IPW_FLAG_BUSY 0x0040
#define IPW_FLAG_ASSOCIATING 0x0080
#define IPW_FLAG_ASSOCIATED 0x0100
+#define IPW_FLAG_RUNNING 0x0200
struct resource *irq;
struct resource *mem;
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
void *sc_ih;
const struct firmware *sc_firmware;
int sc_tx_timer;
int sc_scan_timer;
bus_dma_tag_t parent_dmat;
bus_dma_tag_t tbd_dmat;
bus_dma_tag_t rbd_dmat;
bus_dma_tag_t status_dmat;
bus_dma_tag_t cmd_dmat;
bus_dma_tag_t hdr_dmat;
bus_dma_tag_t txbuf_dmat;
bus_dma_tag_t rxbuf_dmat;
bus_dmamap_t tbd_map;
bus_dmamap_t rbd_map;
bus_dmamap_t status_map;
bus_dmamap_t cmd_map;
bus_addr_t tbd_phys;
bus_addr_t rbd_phys;
bus_addr_t status_phys;
struct ipw_bd *tbd_list;
struct ipw_bd *rbd_list;
struct ipw_status *status_list;
struct ipw_cmd cmd;
struct ipw_soft_bd stbd_list[IPW_NTBD];
struct ipw_soft_buf tx_sbuf_list[IPW_NDATA];
struct ipw_soft_hdr shdr_list[IPW_NDATA];
struct ipw_soft_bd srbd_list[IPW_NRBD];
struct ipw_soft_buf rx_sbuf_list[IPW_NRBD];
SLIST_HEAD(, ipw_soft_hdr) free_shdr;
SLIST_HEAD(, ipw_soft_buf) free_sbuf;
uint32_t table1_base;
uint32_t table2_base;
uint32_t txcur;
uint32_t txold;
uint32_t rxcur;
int txfree;
struct ipw_rx_radiotap_header sc_rxtap;
struct ipw_tx_radiotap_header sc_txtap;
};
/*
* NB.: This models the only instance of async locking in ipw_init_locked
* and must be kept in sync.
*/
#define IPW_LOCK(sc) mtx_lock(&sc->sc_mtx);
#define IPW_UNLOCK(sc) mtx_unlock(&sc->sc_mtx);
#define IPW_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
Index: head/sys/dev/iwi/if_iwi.c
===================================================================
--- head/sys/dev/iwi/if_iwi.c (revision 287196)
+++ head/sys/dev/iwi/if_iwi.c (revision 287197)
@@ -1,3636 +1,3582 @@
/*-
* Copyright (c) 2004, 2005
* Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
* Copyright (c) 2005-2006 Sam Leffler, Errno Consulting
* Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Intel(R) PRO/Wireless 2200BG/2225BG/2915ABG driver
* http://www.intel.com/network/connectivity/products/wireless/prowireless_mobile.htm
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/proc.h>
#include <sys/mount.h>
#include <sys/namei.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <sys/taskqueue.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_input.h>
#include <net80211/ieee80211_regdomain.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <dev/iwi/if_iwireg.h>
#include <dev/iwi/if_iwivar.h>
#include <dev/iwi/if_iwi_ioctl.h>
#define IWI_DEBUG
#ifdef IWI_DEBUG
#define DPRINTF(x) do { if (iwi_debug > 0) printf x; } while (0)
#define DPRINTFN(n, x) do { if (iwi_debug >= (n)) printf x; } while (0)
int iwi_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, iwi, CTLFLAG_RW, &iwi_debug, 0, "iwi debug level");
static const char *iwi_fw_states[] = {
"IDLE", /* IWI_FW_IDLE */
"LOADING", /* IWI_FW_LOADING */
"ASSOCIATING", /* IWI_FW_ASSOCIATING */
"DISASSOCIATING", /* IWI_FW_DISASSOCIATING */
"SCANNING", /* IWI_FW_SCANNING */
};
#else
#define DPRINTF(x)
#define DPRINTFN(n, x)
#endif
MODULE_DEPEND(iwi, pci, 1, 1, 1);
MODULE_DEPEND(iwi, wlan, 1, 1, 1);
MODULE_DEPEND(iwi, firmware, 1, 1, 1);
enum {
IWI_LED_TX,
IWI_LED_RX,
IWI_LED_POLL,
};
struct iwi_ident {
uint16_t vendor;
uint16_t device;
const char *name;
};
static const struct iwi_ident iwi_ident_table[] = {
{ 0x8086, 0x4220, "Intel(R) PRO/Wireless 2200BG" },
{ 0x8086, 0x4221, "Intel(R) PRO/Wireless 2225BG" },
{ 0x8086, 0x4223, "Intel(R) PRO/Wireless 2915ABG" },
{ 0x8086, 0x4224, "Intel(R) PRO/Wireless 2915ABG" },
{ 0, 0, NULL }
};
static struct ieee80211vap *iwi_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void iwi_vap_delete(struct ieee80211vap *);
static void iwi_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int iwi_alloc_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *,
int);
static void iwi_reset_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *);
static void iwi_free_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *);
static int iwi_alloc_tx_ring(struct iwi_softc *, struct iwi_tx_ring *,
int, bus_addr_t, bus_addr_t);
static void iwi_reset_tx_ring(struct iwi_softc *, struct iwi_tx_ring *);
static void iwi_free_tx_ring(struct iwi_softc *, struct iwi_tx_ring *);
static int iwi_alloc_rx_ring(struct iwi_softc *, struct iwi_rx_ring *,
int);
static void iwi_reset_rx_ring(struct iwi_softc *, struct iwi_rx_ring *);
static void iwi_free_rx_ring(struct iwi_softc *, struct iwi_rx_ring *);
static struct ieee80211_node *iwi_node_alloc(struct ieee80211vap *,
const uint8_t [IEEE80211_ADDR_LEN]);
static void iwi_node_free(struct ieee80211_node *);
static void iwi_media_status(struct ifnet *, struct ifmediareq *);
static int iwi_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void iwi_wme_init(struct iwi_softc *);
static int iwi_wme_setparams(struct iwi_softc *);
static void iwi_update_wme(void *, int);
static int iwi_wme_update(struct ieee80211com *);
static uint16_t iwi_read_prom_word(struct iwi_softc *, uint8_t);
static void iwi_frame_intr(struct iwi_softc *, struct iwi_rx_data *, int,
struct iwi_frame *);
static void iwi_notification_intr(struct iwi_softc *, struct iwi_notif *);
static void iwi_rx_intr(struct iwi_softc *);
static void iwi_tx_intr(struct iwi_softc *, struct iwi_tx_ring *);
static void iwi_intr(void *);
static int iwi_cmd(struct iwi_softc *, uint8_t, void *, uint8_t);
static void iwi_write_ibssnode(struct iwi_softc *, const u_int8_t [], int);
-static int iwi_tx_start(struct ifnet *, struct mbuf *,
+static int iwi_tx_start(struct iwi_softc *, struct mbuf *,
struct ieee80211_node *, int);
static int iwi_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
-static void iwi_start_locked(struct ifnet *);
-static void iwi_start(struct ifnet *);
+static void iwi_start(struct iwi_softc *);
+static int iwi_transmit(struct ieee80211com *, struct mbuf *);
static void iwi_watchdog(void *);
-static int iwi_ioctl(struct ifnet *, u_long, caddr_t);
+static int iwi_ioctl(struct ieee80211com *, u_long, void *);
+static void iwi_parent(struct ieee80211com *);
static void iwi_stop_master(struct iwi_softc *);
static int iwi_reset(struct iwi_softc *);
static int iwi_load_ucode(struct iwi_softc *, const struct iwi_fw *);
static int iwi_load_firmware(struct iwi_softc *, const struct iwi_fw *);
static void iwi_release_fw_dma(struct iwi_softc *sc);
static int iwi_config(struct iwi_softc *);
static int iwi_get_firmware(struct iwi_softc *, enum ieee80211_opmode);
static void iwi_put_firmware(struct iwi_softc *);
static void iwi_monitor_scan(void *, int);
static int iwi_scanchan(struct iwi_softc *, unsigned long, int);
static void iwi_scan_start(struct ieee80211com *);
static void iwi_scan_end(struct ieee80211com *);
static void iwi_set_channel(struct ieee80211com *);
static void iwi_scan_curchan(struct ieee80211_scan_state *, unsigned long maxdwell);
static void iwi_scan_mindwell(struct ieee80211_scan_state *);
static int iwi_auth_and_assoc(struct iwi_softc *, struct ieee80211vap *);
static void iwi_disassoc(void *, int);
static int iwi_disassociate(struct iwi_softc *, int quiet);
static void iwi_init_locked(struct iwi_softc *);
static void iwi_init(void *);
static int iwi_init_fw_dma(struct iwi_softc *, int);
static void iwi_stop_locked(void *);
static void iwi_stop(struct iwi_softc *);
static void iwi_restart(void *, int);
static int iwi_getrfkill(struct iwi_softc *);
static void iwi_radio_on(void *, int);
static void iwi_radio_off(void *, int);
static void iwi_sysctlattach(struct iwi_softc *);
static void iwi_led_event(struct iwi_softc *, int);
static void iwi_ledattach(struct iwi_softc *);
static int iwi_probe(device_t);
static int iwi_attach(device_t);
static int iwi_detach(device_t);
static int iwi_shutdown(device_t);
static int iwi_suspend(device_t);
static int iwi_resume(device_t);
static device_method_t iwi_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, iwi_probe),
DEVMETHOD(device_attach, iwi_attach),
DEVMETHOD(device_detach, iwi_detach),
DEVMETHOD(device_shutdown, iwi_shutdown),
DEVMETHOD(device_suspend, iwi_suspend),
DEVMETHOD(device_resume, iwi_resume),
DEVMETHOD_END
};
static driver_t iwi_driver = {
"iwi",
iwi_methods,
sizeof (struct iwi_softc)
};
static devclass_t iwi_devclass;
DRIVER_MODULE(iwi, pci, iwi_driver, iwi_devclass, NULL, NULL);
MODULE_VERSION(iwi, 1);
static __inline uint8_t
MEM_READ_1(struct iwi_softc *sc, uint32_t addr)
{
CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr);
return CSR_READ_1(sc, IWI_CSR_INDIRECT_DATA);
}
static __inline uint32_t
MEM_READ_4(struct iwi_softc *sc, uint32_t addr)
{
CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr);
return CSR_READ_4(sc, IWI_CSR_INDIRECT_DATA);
}
static int
iwi_probe(device_t dev)
{
const struct iwi_ident *ident;
for (ident = iwi_ident_table; ident->name != NULL; ident++) {
if (pci_get_vendor(dev) == ident->vendor &&
pci_get_device(dev) == ident->device) {
device_set_desc(dev, ident->name);
return (BUS_PROBE_DEFAULT);
}
}
return ENXIO;
}
static int
iwi_attach(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
- struct ifnet *ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t val;
int i, error;
uint8_t bands;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
sc->sc_dev = dev;
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- return ENXIO;
- }
- ic = ifp->if_l2com;
-
IWI_LOCK_INIT(sc);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
sc->sc_unr = new_unrhdr(1, IWI_MAX_IBSSNODE-1, &sc->sc_mtx);
TASK_INIT(&sc->sc_radiontask, 0, iwi_radio_on, sc);
TASK_INIT(&sc->sc_radiofftask, 0, iwi_radio_off, sc);
TASK_INIT(&sc->sc_restarttask, 0, iwi_restart, sc);
TASK_INIT(&sc->sc_disassoctask, 0, iwi_disassoc, sc);
TASK_INIT(&sc->sc_wmetask, 0, iwi_update_wme, sc);
TASK_INIT(&sc->sc_monitortask, 0, iwi_monitor_scan, sc);
callout_init_mtx(&sc->sc_wdtimer, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_rftimer, &sc->sc_mtx, 0);
pci_write_config(dev, 0x41, 0, 1);
/* enable bus-mastering */
pci_enable_busmaster(dev);
i = PCIR_BAR(0);
sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE);
if (sc->mem == NULL) {
device_printf(dev, "could not allocate memory resource\n");
goto fail;
}
sc->sc_st = rman_get_bustag(sc->mem);
sc->sc_sh = rman_get_bushandle(sc->mem);
i = 0;
sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i,
RF_ACTIVE | RF_SHAREABLE);
if (sc->irq == NULL) {
device_printf(dev, "could not allocate interrupt resource\n");
goto fail;
}
if (iwi_reset(sc) != 0) {
device_printf(dev, "could not reset adapter\n");
goto fail;
}
/*
* Allocate rings.
*/
if (iwi_alloc_cmd_ring(sc, &sc->cmdq, IWI_CMD_RING_COUNT) != 0) {
device_printf(dev, "could not allocate Cmd ring\n");
goto fail;
}
for (i = 0; i < 4; i++) {
error = iwi_alloc_tx_ring(sc, &sc->txq[i], IWI_TX_RING_COUNT,
IWI_CSR_TX1_RIDX + i * 4,
IWI_CSR_TX1_WIDX + i * 4);
if (error != 0) {
device_printf(dev, "could not allocate Tx ring %d\n",
i+i);
goto fail;
}
}
if (iwi_alloc_rx_ring(sc, &sc->rxq, IWI_RX_RING_COUNT) != 0) {
device_printf(dev, "could not allocate Rx ring\n");
goto fail;
}
iwi_wme_init(sc);
- ifp->if_softc = sc;
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = iwi_init;
- ifp->if_ioctl = iwi_ioctl;
- ifp->if_start = iwi_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_IBSS /* IBSS mode supported */
| IEEE80211_C_MONITOR /* monitor mode supported */
| IEEE80211_C_PMGT /* power save supported */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_WPA /* 802.11i */
| IEEE80211_C_WME /* 802.11e */
#if 0
| IEEE80211_C_BGSCAN /* capable of bg scanning */
#endif
;
/* read MAC address from EEPROM */
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 0);
- macaddr[0] = val & 0xff;
- macaddr[1] = val >> 8;
+ ic->ic_macaddr[0] = val & 0xff;
+ ic->ic_macaddr[1] = val >> 8;
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 1);
- macaddr[2] = val & 0xff;
- macaddr[3] = val >> 8;
+ ic->ic_macaddr[2] = val & 0xff;
+ ic->ic_macaddr[3] = val >> 8;
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 2);
- macaddr[4] = val & 0xff;
- macaddr[5] = val >> 8;
+ ic->ic_macaddr[4] = val & 0xff;
+ ic->ic_macaddr[5] = val >> 8;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
if (pci_get_device(dev) >= 0x4223)
setbit(&bands, IEEE80211_MODE_11A);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
/* override default methods */
ic->ic_node_alloc = iwi_node_alloc;
sc->sc_node_free = ic->ic_node_free;
ic->ic_node_free = iwi_node_free;
ic->ic_raw_xmit = iwi_raw_xmit;
ic->ic_scan_start = iwi_scan_start;
ic->ic_scan_end = iwi_scan_end;
ic->ic_set_channel = iwi_set_channel;
ic->ic_scan_curchan = iwi_scan_curchan;
ic->ic_scan_mindwell = iwi_scan_mindwell;
ic->ic_wme.wme_update = iwi_wme_update;
ic->ic_vap_create = iwi_vap_create;
ic->ic_vap_delete = iwi_vap_delete;
+ ic->ic_ioctl = iwi_ioctl;
+ ic->ic_transmit = iwi_transmit;
+ ic->ic_parent = iwi_parent;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
IWI_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
IWI_RX_RADIOTAP_PRESENT);
iwi_sysctlattach(sc);
iwi_ledattach(sc);
/*
* Hook our interrupt after all initialization is complete.
*/
error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, iwi_intr, sc, &sc->sc_ih);
if (error != 0) {
device_printf(dev, "could not set up interrupt\n");
goto fail;
}
if (bootverbose)
ieee80211_announce(ic);
return 0;
fail:
/* XXX fix */
iwi_detach(dev);
return ENXIO;
}
static int
iwi_detach(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
bus_teardown_intr(dev, sc->irq, sc->sc_ih);
/* NB: do early to drain any pending tasks */
ieee80211_draintask(ic, &sc->sc_radiontask);
ieee80211_draintask(ic, &sc->sc_radiofftask);
ieee80211_draintask(ic, &sc->sc_restarttask);
ieee80211_draintask(ic, &sc->sc_disassoctask);
ieee80211_draintask(ic, &sc->sc_monitortask);
iwi_stop(sc);
ieee80211_ifdetach(ic);
iwi_put_firmware(sc);
iwi_release_fw_dma(sc);
iwi_free_cmd_ring(sc, &sc->cmdq);
iwi_free_tx_ring(sc, &sc->txq[0]);
iwi_free_tx_ring(sc, &sc->txq[1]);
iwi_free_tx_ring(sc, &sc->txq[2]);
iwi_free_tx_ring(sc, &sc->txq[3]);
iwi_free_rx_ring(sc, &sc->rxq);
bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq);
bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem),
sc->mem);
delete_unrhdr(sc->sc_unr);
+ mbufq_drain(&sc->sc_snd);
IWI_LOCK_DESTROY(sc);
- if_free(ifp);
-
return 0;
}
static struct ieee80211vap *
iwi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
- struct ifnet *ifp = ic->ic_ifp;
- struct iwi_softc *sc = ifp->if_softc;
+ struct iwi_softc *sc = ic->ic_softc;
struct iwi_vap *ivp;
struct ieee80211vap *vap;
int i;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
/*
* Get firmware image (and possibly dma memory) on mode change.
*/
if (iwi_get_firmware(sc, opmode))
return NULL;
/* allocate DMA memory for mapping firmware image */
i = sc->fw_fw.size;
if (sc->fw_boot.size > i)
i = sc->fw_boot.size;
/* XXX do we dma the ucode as well ? */
if (sc->fw_uc.size > i)
i = sc->fw_uc.size;
if (iwi_init_fw_dma(sc, i))
return NULL;
- ivp = (struct iwi_vap *) malloc(sizeof(struct iwi_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (ivp == NULL)
- return NULL;
+ ivp = malloc(sizeof(struct iwi_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &ivp->iwi_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override the default, the setting comes from the linux driver */
vap->iv_bmissthreshold = 24;
/* override with driver methods */
ivp->iwi_newstate = vap->iv_newstate;
vap->iv_newstate = iwi_newstate;
/* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, iwi_media_status);
+ ieee80211_vap_attach(vap, ieee80211_media_change, iwi_media_status,
+ mac);
ic->ic_opmode = opmode;
return vap;
}
static void
iwi_vap_delete(struct ieee80211vap *vap)
{
struct iwi_vap *ivp = IWI_VAP(vap);
ieee80211_vap_detach(vap);
free(ivp, M_80211_VAP);
}
static void
iwi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
iwi_alloc_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring, int count)
{
int error;
ring->count = count;
ring->queued = 0;
ring->cur = ring->next = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * IWI_CMD_DESC_SIZE, 1, count * IWI_CMD_DESC_SIZE, 0,
NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * IWI_CMD_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
return 0;
fail: iwi_free_cmd_ring(sc, ring);
return error;
}
static void
iwi_reset_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring)
{
ring->queued = 0;
ring->cur = ring->next = 0;
}
static void
iwi_free_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring)
{
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
}
static int
iwi_alloc_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring, int count,
bus_addr_t csr_ridx, bus_addr_t csr_widx)
{
int i, error;
ring->count = count;
ring->queued = 0;
ring->cur = ring->next = 0;
ring->csr_ridx = csr_ridx;
ring->csr_widx = csr_widx;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * IWI_TX_DESC_SIZE, 1, count * IWI_TX_DESC_SIZE, 0, NULL,
NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * IWI_TX_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
ring->data = malloc(count * sizeof (struct iwi_tx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
IWI_MAX_NSEG, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
error = bus_dmamap_create(ring->data_dmat, 0,
&ring->data[i].map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
}
return 0;
fail: iwi_free_tx_ring(sc, ring);
return error;
}
static void
iwi_reset_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring)
{
struct iwi_tx_data *data;
int i;
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
}
ring->queued = 0;
ring->cur = ring->next = 0;
}
static void
iwi_free_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring)
{
struct iwi_tx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->ni != NULL)
ieee80211_free_node(data->ni);
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
iwi_alloc_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring, int count)
{
struct iwi_rx_data *data;
int i, error;
ring->count = count;
ring->cur = 0;
ring->data = malloc(count * sizeof (struct iwi_rx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
data = &ring->data[i];
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr,
&data->physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load rx buf DMA map");
goto fail;
}
data->reg = IWI_CSR_RX_BASE + i * 4;
}
return 0;
fail: iwi_free_rx_ring(sc, ring);
return error;
}
static void
iwi_reset_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring)
{
ring->cur = 0;
}
static void
iwi_free_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring)
{
struct iwi_rx_data *data;
int i;
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
iwi_shutdown(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
iwi_stop(sc);
iwi_put_firmware(sc); /* ??? XXX */
return 0;
}
static int
iwi_suspend(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
ieee80211_suspend_all(ic);
return 0;
}
static int
iwi_resume(device_t dev)
{
struct iwi_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
pci_write_config(dev, 0x41, 0, 1);
ieee80211_resume_all(ic);
return 0;
}
static struct ieee80211_node *
iwi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct iwi_node *in;
in = malloc(sizeof (struct iwi_node), M_80211_NODE, M_NOWAIT | M_ZERO);
if (in == NULL)
return NULL;
/* XXX assign sta table entry for adhoc */
in->in_station = -1;
return &in->in_node;
}
static void
iwi_node_free(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
- struct iwi_softc *sc = ic->ic_ifp->if_softc;
+ struct iwi_softc *sc = ic->ic_softc;
struct iwi_node *in = (struct iwi_node *)ni;
if (in->in_station != -1) {
DPRINTF(("%s mac %6D station %u\n", __func__,
ni->ni_macaddr, ":", in->in_station));
free_unr(sc->sc_unr, in->in_station);
}
sc->sc_node_free(ni);
}
/*
* Convert h/w rate code to IEEE rate code.
*/
static int
iwi_cvtrate(int iwirate)
{
switch (iwirate) {
case IWI_RATE_DS1: return 2;
case IWI_RATE_DS2: return 4;
case IWI_RATE_DS5: return 11;
case IWI_RATE_DS11: return 22;
case IWI_RATE_OFDM6: return 12;
case IWI_RATE_OFDM9: return 18;
case IWI_RATE_OFDM12: return 24;
case IWI_RATE_OFDM18: return 36;
case IWI_RATE_OFDM24: return 48;
case IWI_RATE_OFDM36: return 72;
case IWI_RATE_OFDM48: return 96;
case IWI_RATE_OFDM54: return 108;
}
return 0;
}
/*
* The firmware automatically adapts the transmit speed. We report its current
* value here.
*/
static void
iwi_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
- struct iwi_softc *sc = ic->ic_ifp->if_softc;
+ struct iwi_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
/* read current transmission rate from adapter */
ni = ieee80211_ref_node(vap->iv_bss);
ni->ni_txrate =
iwi_cvtrate(CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE));
ieee80211_free_node(ni);
ieee80211_media_status(ifp, imr);
}
static int
iwi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct iwi_vap *ivp = IWI_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct iwi_softc *sc = ifp->if_softc;
+ struct iwi_softc *sc = ic->ic_softc;
IWI_LOCK_DECL;
DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate], sc->flags));
IEEE80211_UNLOCK(ic);
IWI_LOCK(sc);
switch (nstate) {
case IEEE80211_S_INIT:
/*
* NB: don't try to do this if iwi_stop_master has
* shutdown the firmware and disabled interrupts.
*/
if (vap->iv_state == IEEE80211_S_RUN &&
(sc->flags & IWI_FLAG_FW_INITED))
iwi_disassociate(sc, 0);
break;
case IEEE80211_S_AUTH:
iwi_auth_and_assoc(sc, vap);
break;
case IEEE80211_S_RUN:
if (vap->iv_opmode == IEEE80211_M_IBSS &&
vap->iv_state == IEEE80211_S_SCAN) {
/*
* XXX when joining an ibss network we are called
* with a SCAN -> RUN transition on scan complete.
* Use that to call iwi_auth_and_assoc. On completing
* the join we are then called again with an
* AUTH -> RUN transition and we want to do nothing.
* This is all totally bogus and needs to be redone.
*/
iwi_auth_and_assoc(sc, vap);
} else if (vap->iv_opmode == IEEE80211_M_MONITOR)
ieee80211_runtask(ic, &sc->sc_monitortask);
break;
case IEEE80211_S_ASSOC:
/*
* If we are transitioning from AUTH then just wait
* for the ASSOC status to come back from the firmware.
* Otherwise we need to issue the association request.
*/
if (vap->iv_state == IEEE80211_S_AUTH)
break;
iwi_auth_and_assoc(sc, vap);
break;
default:
break;
}
IWI_UNLOCK(sc);
IEEE80211_LOCK(ic);
return ivp->iwi_newstate(vap, nstate, arg);
}
/*
* WME parameters coming from IEEE 802.11e specification. These values are
* already declared in ieee80211_proto.c, but they are static so they can't
* be reused here.
*/
static const struct wmeParams iwi_wme_cck_params[WME_NUM_AC] = {
{ 0, 3, 5, 7, 0 }, /* WME_AC_BE */
{ 0, 3, 5, 10, 0 }, /* WME_AC_BK */
{ 0, 2, 4, 5, 188 }, /* WME_AC_VI */
{ 0, 2, 3, 4, 102 } /* WME_AC_VO */
};
static const struct wmeParams iwi_wme_ofdm_params[WME_NUM_AC] = {
{ 0, 3, 4, 6, 0 }, /* WME_AC_BE */
{ 0, 3, 4, 10, 0 }, /* WME_AC_BK */
{ 0, 2, 3, 4, 94 }, /* WME_AC_VI */
{ 0, 2, 2, 3, 47 } /* WME_AC_VO */
};
#define IWI_EXP2(v) htole16((1 << (v)) - 1)
#define IWI_USEC(v) htole16(IEEE80211_TXOP_TO_US(v))
static void
iwi_wme_init(struct iwi_softc *sc)
{
const struct wmeParams *wmep;
int ac;
memset(sc->wme, 0, sizeof sc->wme);
for (ac = 0; ac < WME_NUM_AC; ac++) {
/* set WME values for CCK modulation */
wmep = &iwi_wme_cck_params[ac];
sc->wme[1].aifsn[ac] = wmep->wmep_aifsn;
sc->wme[1].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin);
sc->wme[1].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax);
sc->wme[1].burst[ac] = IWI_USEC(wmep->wmep_txopLimit);
sc->wme[1].acm[ac] = wmep->wmep_acm;
/* set WME values for OFDM modulation */
wmep = &iwi_wme_ofdm_params[ac];
sc->wme[2].aifsn[ac] = wmep->wmep_aifsn;
sc->wme[2].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin);
sc->wme[2].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax);
sc->wme[2].burst[ac] = IWI_USEC(wmep->wmep_txopLimit);
sc->wme[2].acm[ac] = wmep->wmep_acm;
}
}
static int
iwi_wme_setparams(struct iwi_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct wmeParams *wmep;
int ac;
for (ac = 0; ac < WME_NUM_AC; ac++) {
/* set WME values for current operating mode */
wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
sc->wme[0].aifsn[ac] = wmep->wmep_aifsn;
sc->wme[0].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin);
sc->wme[0].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax);
sc->wme[0].burst[ac] = IWI_USEC(wmep->wmep_txopLimit);
sc->wme[0].acm[ac] = wmep->wmep_acm;
}
DPRINTF(("Setting WME parameters\n"));
return iwi_cmd(sc, IWI_CMD_SET_WME_PARAMS, sc->wme, sizeof sc->wme);
}
#undef IWI_USEC
#undef IWI_EXP2
static void
iwi_update_wme(void *arg, int npending)
{
struct iwi_softc *sc = arg;
IWI_LOCK_DECL;
IWI_LOCK(sc);
(void) iwi_wme_setparams(sc);
IWI_UNLOCK(sc);
}
static int
iwi_wme_update(struct ieee80211com *ic)
{
- struct iwi_softc *sc = ic->ic_ifp->if_softc;
+ struct iwi_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
/*
* We may be called to update the WME parameters in
* the adapter at various places. If we're already
* associated then initiate the request immediately;
* otherwise we assume the params will get sent down
* to the adapter as part of the work iwi_auth_and_assoc
* does.
*/
if (vap->iv_state == IEEE80211_S_RUN)
ieee80211_runtask(ic, &sc->sc_wmetask);
return (0);
}
static int
iwi_wme_setie(struct iwi_softc *sc)
{
struct ieee80211_wme_info wme;
memset(&wme, 0, sizeof wme);
wme.wme_id = IEEE80211_ELEMID_VENDOR;
wme.wme_len = sizeof (struct ieee80211_wme_info) - 2;
wme.wme_oui[0] = 0x00;
wme.wme_oui[1] = 0x50;
wme.wme_oui[2] = 0xf2;
wme.wme_type = WME_OUI_TYPE;
wme.wme_subtype = WME_INFO_OUI_SUBTYPE;
wme.wme_version = WME_VERSION;
wme.wme_info = 0;
DPRINTF(("Setting WME IE (len=%u)\n", wme.wme_len));
return iwi_cmd(sc, IWI_CMD_SET_WMEIE, &wme, sizeof wme);
}
/*
* Read 16 bits at address 'addr' from the serial EEPROM.
*/
static uint16_t
iwi_read_prom_word(struct iwi_softc *sc, uint8_t addr)
{
uint32_t tmp;
uint16_t val;
int n;
/* clock C once before the first command */
IWI_EEPROM_CTL(sc, 0);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
/* write start bit (1) */
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C);
/* write READ opcode (10) */
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C);
/* write address A7-A0 */
for (n = 7; n >= 0; n--) {
IWI_EEPROM_CTL(sc, IWI_EEPROM_S |
(((addr >> n) & 1) << IWI_EEPROM_SHIFT_D));
IWI_EEPROM_CTL(sc, IWI_EEPROM_S |
(((addr >> n) & 1) << IWI_EEPROM_SHIFT_D) | IWI_EEPROM_C);
}
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
/* read data Q15-Q0 */
val = 0;
for (n = 15; n >= 0; n--) {
IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C);
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
tmp = MEM_READ_4(sc, IWI_MEM_EEPROM_CTL);
val |= ((tmp & IWI_EEPROM_Q) >> IWI_EEPROM_SHIFT_Q) << n;
}
IWI_EEPROM_CTL(sc, 0);
/* clear Chip Select and clock C */
IWI_EEPROM_CTL(sc, IWI_EEPROM_S);
IWI_EEPROM_CTL(sc, 0);
IWI_EEPROM_CTL(sc, IWI_EEPROM_C);
return val;
}
static void
iwi_setcurchan(struct iwi_softc *sc, int chan)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
sc->curchan = chan;
ieee80211_radiotap_chan_change(ic);
}
static void
iwi_frame_intr(struct iwi_softc *sc, struct iwi_rx_data *data, int i,
struct iwi_frame *frame)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mbuf *mnew, *m;
struct ieee80211_node *ni;
int type, error, framelen;
int8_t rssi, nf;
IWI_LOCK_DECL;
framelen = le16toh(frame->len);
if (framelen < IEEE80211_MIN_LEN || framelen > MCLBYTES) {
/*
* XXX >MCLBYTES is bogus as it means the h/w dma'd
* out of bounds; need to figure out how to limit
* frame size in the firmware
*/
/* XXX stat */
DPRINTFN(1,
("drop rx frame len=%u chan=%u rssi=%u rssi_dbm=%u\n",
le16toh(frame->len), frame->chan, frame->rssi,
frame->rssi_dbm));
return;
}
DPRINTFN(5, ("received frame len=%u chan=%u rssi=%u rssi_dbm=%u\n",
le16toh(frame->len), frame->chan, frame->rssi, frame->rssi_dbm));
if (frame->chan != sc->curchan)
iwi_setcurchan(sc, frame->chan);
/*
* Try to allocate a new mbuf for this ring element and load it before
* processing the current mbuf. If the ring element cannot be loaded,
* drop the received packet and reuse the old mbuf. In the unlikely
* case that the old mbuf can't be reloaded either, explicitly panic.
*/
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
bus_dmamap_unload(sc->rxq.data_dmat, data->map);
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(mnew, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr,
0);
if (error != 0) {
m_freem(mnew);
/* try to reload the old mbuf */
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr,
&data->physaddr, 0);
if (error != 0) {
/* very unlikely that it will fail... */
panic("%s: could not load old rx mbuf",
device_get_name(sc->sc_dev));
}
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
/*
* New mbuf successfully loaded, update Rx ring and continue
* processing.
*/
m = data->m;
data->m = mnew;
CSR_WRITE_4(sc, data->reg, data->physaddr);
/* finalize mbuf */
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = sizeof (struct iwi_hdr) +
sizeof (struct iwi_frame) + framelen;
m_adj(m, sizeof (struct iwi_hdr) + sizeof (struct iwi_frame));
rssi = frame->rssi_dbm;
nf = -95;
if (ieee80211_radiotap_active(ic)) {
struct iwi_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
tap->wr_antsignal = rssi;
tap->wr_antnoise = nf;
tap->wr_rate = iwi_cvtrate(frame->rate);
tap->wr_antenna = frame->antenna;
}
IWI_UNLOCK(sc);
ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *));
if (ni != NULL) {
type = ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
type = ieee80211_input_all(ic, m, rssi, nf);
IWI_LOCK(sc);
if (sc->sc_softled) {
/*
* Blink for any data frame. Otherwise do a
* heartbeat-style blink when idle. The latter
* is mainly for station mode where we depend on
* periodic beacon frames to trigger the poll event.
*/
if (type == IEEE80211_FC0_TYPE_DATA) {
sc->sc_rxrate = frame->rate;
iwi_led_event(sc, IWI_LED_RX);
} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
iwi_led_event(sc, IWI_LED_POLL);
}
}
/*
* Check for an association response frame to see if QoS
* has been negotiated. We parse just enough to figure
* out if we're supposed to use QoS. The proper solution
* is to pass the frame up so ieee80211_input can do the
* work but that's made hard by how things currently are
* done in the driver.
*/
static void
iwi_checkforqos(struct ieee80211vap *vap,
const struct ieee80211_frame *wh, int len)
{
#define SUBTYPE(wh) ((wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK)
const uint8_t *frm, *efrm, *wme;
struct ieee80211_node *ni;
uint16_t capinfo, status, associd;
/* NB: +8 for capinfo, status, associd, and first ie */
if (!(sizeof(*wh)+8 < len && len < IEEE80211_MAX_LEN) ||
SUBTYPE(wh) != IEEE80211_FC0_SUBTYPE_ASSOC_RESP)
return;
/*
* asresp frame format
* [2] capability information
* [2] status
* [2] association ID
* [tlv] supported rates
* [tlv] extended supported rates
* [tlv] WME
*/
frm = (const uint8_t *)&wh[1];
efrm = ((const uint8_t *) wh) + len;
capinfo = le16toh(*(const uint16_t *)frm);
frm += 2;
status = le16toh(*(const uint16_t *)frm);
frm += 2;
associd = le16toh(*(const uint16_t *)frm);
frm += 2;
wme = NULL;
while (efrm - frm > 1) {
IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
switch (*frm) {
case IEEE80211_ELEMID_VENDOR:
if (iswmeoui(frm))
wme = frm;
break;
}
frm += frm[1] + 2;
}
ni = ieee80211_ref_node(vap->iv_bss);
ni->ni_capinfo = capinfo;
ni->ni_associd = associd & 0x3fff;
if (wme != NULL)
ni->ni_flags |= IEEE80211_NODE_QOS;
else
ni->ni_flags &= ~IEEE80211_NODE_QOS;
ieee80211_free_node(ni);
#undef SUBTYPE
}
static void
iwi_notif_link_quality(struct iwi_softc *sc, struct iwi_notif *notif)
{
struct iwi_notif_link_quality *lq;
int len;
len = le16toh(notif->len);
DPRINTFN(5, ("Notification (%u) - len=%d, sizeof=%zu\n",
notif->type,
len,
sizeof(struct iwi_notif_link_quality)
));
/* enforce length */
if (len != sizeof(struct iwi_notif_link_quality)) {
DPRINTFN(5, ("Notification: (%u) too short (%d)\n",
notif->type,
len));
return;
}
lq = (struct iwi_notif_link_quality *)(notif + 1);
memcpy(&sc->sc_linkqual, lq, sizeof(sc->sc_linkqual));
sc->sc_linkqual_valid = 1;
}
/*
* Task queue callbacks for iwi_notification_intr used to avoid LOR's.
*/
static void
iwi_notification_intr(struct iwi_softc *sc, struct iwi_notif *notif)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct iwi_notif_scan_channel *chan;
struct iwi_notif_scan_complete *scan;
struct iwi_notif_authentication *auth;
struct iwi_notif_association *assoc;
struct iwi_notif_beacon_state *beacon;
switch (notif->type) {
case IWI_NOTIF_TYPE_SCAN_CHANNEL:
chan = (struct iwi_notif_scan_channel *)(notif + 1);
DPRINTFN(3, ("Scan of channel %u complete (%u)\n",
ieee80211_ieee2mhz(chan->nchan, 0), chan->nchan));
/* Reset the timer, the scan is still going */
sc->sc_state_timer = 3;
break;
case IWI_NOTIF_TYPE_SCAN_COMPLETE:
scan = (struct iwi_notif_scan_complete *)(notif + 1);
DPRINTFN(2, ("Scan completed (%u, %u)\n", scan->nchan,
scan->status));
IWI_STATE_END(sc, IWI_FW_SCANNING);
/*
* Monitor mode works by doing a passive scan to set
* the channel and enable rx. Because we don't want
* to abort a scan lest the firmware crash we scan
* for a short period of time and automatically restart
* the scan when notified the sweep has completed.
*/
if (vap->iv_opmode == IEEE80211_M_MONITOR) {
ieee80211_runtask(ic, &sc->sc_monitortask);
break;
}
if (scan->status == IWI_SCAN_COMPLETED) {
/* NB: don't need to defer, net80211 does it for us */
ieee80211_scan_next(vap);
}
break;
case IWI_NOTIF_TYPE_AUTHENTICATION:
auth = (struct iwi_notif_authentication *)(notif + 1);
switch (auth->state) {
case IWI_AUTH_SUCCESS:
DPRINTFN(2, ("Authentication succeeeded\n"));
ieee80211_new_state(vap, IEEE80211_S_ASSOC, -1);
break;
case IWI_AUTH_FAIL:
/*
* These are delivered as an unsolicited deauth
* (e.g. due to inactivity) or in response to an
* associate request.
*/
sc->flags &= ~IWI_FLAG_ASSOCIATED;
if (vap->iv_state != IEEE80211_S_RUN) {
DPRINTFN(2, ("Authentication failed\n"));
vap->iv_stats.is_rx_auth_fail++;
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
} else {
DPRINTFN(2, ("Deauthenticated\n"));
vap->iv_stats.is_rx_deauth++;
}
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
break;
case IWI_AUTH_SENT_1:
case IWI_AUTH_RECV_2:
case IWI_AUTH_SEQ1_PASS:
break;
case IWI_AUTH_SEQ1_FAIL:
DPRINTFN(2, ("Initial authentication handshake failed; "
"you probably need shared key\n"));
vap->iv_stats.is_rx_auth_fail++;
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
/* XXX retry shared key when in auto */
break;
default:
device_printf(sc->sc_dev,
"unknown authentication state %u\n", auth->state);
break;
}
break;
case IWI_NOTIF_TYPE_ASSOCIATION:
assoc = (struct iwi_notif_association *)(notif + 1);
switch (assoc->state) {
case IWI_AUTH_SUCCESS:
/* re-association, do nothing */
break;
case IWI_ASSOC_SUCCESS:
DPRINTFN(2, ("Association succeeded\n"));
sc->flags |= IWI_FLAG_ASSOCIATED;
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
iwi_checkforqos(vap,
(const struct ieee80211_frame *)(assoc+1),
le16toh(notif->len) - sizeof(*assoc) - 1);
ieee80211_new_state(vap, IEEE80211_S_RUN, -1);
break;
case IWI_ASSOC_INIT:
sc->flags &= ~IWI_FLAG_ASSOCIATED;
switch (sc->fw_state) {
case IWI_FW_ASSOCIATING:
DPRINTFN(2, ("Association failed\n"));
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
break;
case IWI_FW_DISASSOCIATING:
DPRINTFN(2, ("Dissassociated\n"));
IWI_STATE_END(sc, IWI_FW_DISASSOCIATING);
vap->iv_stats.is_rx_disassoc++;
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
break;
}
break;
default:
device_printf(sc->sc_dev,
"unknown association state %u\n", assoc->state);
break;
}
break;
case IWI_NOTIF_TYPE_BEACON:
/* XXX check struct length */
beacon = (struct iwi_notif_beacon_state *)(notif + 1);
DPRINTFN(5, ("Beacon state (%u, %u)\n",
beacon->state, le32toh(beacon->number)));
if (beacon->state == IWI_BEACON_MISS) {
/*
* The firmware notifies us of every beacon miss
* so we need to track the count against the
* configured threshold before notifying the
* 802.11 layer.
* XXX try to roam, drop assoc only on much higher count
*/
if (le32toh(beacon->number) >= vap->iv_bmissthreshold) {
DPRINTF(("Beacon miss: %u >= %u\n",
le32toh(beacon->number),
vap->iv_bmissthreshold));
vap->iv_stats.is_beacon_miss++;
/*
* It's pointless to notify the 802.11 layer
* as it'll try to send a probe request (which
* we'll discard) and then timeout and drop us
* into scan state. Instead tell the firmware
* to disassociate and then on completion we'll
* kick the state machine to scan.
*/
ieee80211_runtask(ic, &sc->sc_disassoctask);
}
}
break;
case IWI_NOTIF_TYPE_CALIBRATION:
case IWI_NOTIF_TYPE_NOISE:
/* XXX handle? */
DPRINTFN(5, ("Notification (%u)\n", notif->type));
break;
case IWI_NOTIF_TYPE_LINK_QUALITY:
iwi_notif_link_quality(sc, notif);
break;
default:
DPRINTF(("unknown notification type %u flags 0x%x len %u\n",
notif->type, notif->flags, le16toh(notif->len)));
break;
}
}
static void
iwi_rx_intr(struct iwi_softc *sc)
{
struct iwi_rx_data *data;
struct iwi_hdr *hdr;
uint32_t hw;
hw = CSR_READ_4(sc, IWI_CSR_RX_RIDX);
for (; sc->rxq.cur != hw;) {
data = &sc->rxq.data[sc->rxq.cur];
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
hdr = mtod(data->m, struct iwi_hdr *);
switch (hdr->type) {
case IWI_HDR_TYPE_FRAME:
iwi_frame_intr(sc, data, sc->rxq.cur,
(struct iwi_frame *)(hdr + 1));
break;
case IWI_HDR_TYPE_NOTIF:
iwi_notification_intr(sc,
(struct iwi_notif *)(hdr + 1));
break;
default:
device_printf(sc->sc_dev, "unknown hdr type %u\n",
hdr->type);
}
DPRINTFN(15, ("rx done idx=%u\n", sc->rxq.cur));
sc->rxq.cur = (sc->rxq.cur + 1) % IWI_RX_RING_COUNT;
}
/* tell the firmware what we have processed */
hw = (hw == 0) ? IWI_RX_RING_COUNT - 1 : hw - 1;
CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, hw);
}
static void
iwi_tx_intr(struct iwi_softc *sc, struct iwi_tx_ring *txq)
{
- struct ifnet *ifp = sc->sc_ifp;
struct iwi_tx_data *data;
uint32_t hw;
hw = CSR_READ_4(sc, txq->csr_ridx);
- for (; txq->next != hw;) {
+ while (txq->next != hw) {
data = &txq->data[txq->next];
-
+ DPRINTFN(15, ("tx done idx=%u\n", txq->next));
bus_dmamap_sync(txq->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->data_dmat, data->map);
- if (data->m->m_flags & M_TXCB)
- ieee80211_process_callback(data->ni, data->m, 0/*XXX*/);
- m_freem(data->m);
- data->m = NULL;
- ieee80211_free_node(data->ni);
+ ieee80211_tx_complete(data->ni, data->m, 0);
data->ni = NULL;
-
- DPRINTFN(15, ("tx done idx=%u\n", txq->next));
-
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
+ data->m = NULL;
txq->queued--;
txq->next = (txq->next + 1) % IWI_TX_RING_COUNT;
}
-
sc->sc_tx_timer = 0;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
if (sc->sc_softled)
iwi_led_event(sc, IWI_LED_TX);
-
- iwi_start_locked(ifp);
+ iwi_start(sc);
}
static void
iwi_fatal_error_intr(struct iwi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
device_printf(sc->sc_dev, "firmware error\n");
if (vap != NULL)
ieee80211_cancel_scan(vap);
ieee80211_runtask(ic, &sc->sc_restarttask);
sc->flags &= ~IWI_FLAG_BUSY;
sc->sc_busy_timer = 0;
wakeup(sc);
}
static void
iwi_radio_off_intr(struct iwi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- ieee80211_runtask(ic, &sc->sc_radiofftask);
+ ieee80211_runtask(&sc->sc_ic, &sc->sc_radiofftask);
}
static void
iwi_intr(void *arg)
{
struct iwi_softc *sc = arg;
uint32_t r;
IWI_LOCK_DECL;
IWI_LOCK(sc);
if ((r = CSR_READ_4(sc, IWI_CSR_INTR)) == 0 || r == 0xffffffff) {
IWI_UNLOCK(sc);
return;
}
/* acknowledge interrupts */
CSR_WRITE_4(sc, IWI_CSR_INTR, r);
if (r & IWI_INTR_FATAL_ERROR) {
iwi_fatal_error_intr(sc);
goto done;
}
if (r & IWI_INTR_FW_INITED) {
if (!(r & (IWI_INTR_FATAL_ERROR | IWI_INTR_PARITY_ERROR)))
wakeup(sc);
}
if (r & IWI_INTR_RADIO_OFF)
iwi_radio_off_intr(sc);
if (r & IWI_INTR_CMD_DONE) {
sc->flags &= ~IWI_FLAG_BUSY;
sc->sc_busy_timer = 0;
wakeup(sc);
}
if (r & IWI_INTR_TX1_DONE)
iwi_tx_intr(sc, &sc->txq[0]);
if (r & IWI_INTR_TX2_DONE)
iwi_tx_intr(sc, &sc->txq[1]);
if (r & IWI_INTR_TX3_DONE)
iwi_tx_intr(sc, &sc->txq[2]);
if (r & IWI_INTR_TX4_DONE)
iwi_tx_intr(sc, &sc->txq[3]);
if (r & IWI_INTR_RX_DONE)
iwi_rx_intr(sc);
if (r & IWI_INTR_PARITY_ERROR) {
/* XXX rate-limit */
device_printf(sc->sc_dev, "parity error\n");
}
done:
IWI_UNLOCK(sc);
}
static int
iwi_cmd(struct iwi_softc *sc, uint8_t type, void *data, uint8_t len)
{
struct iwi_cmd_desc *desc;
IWI_LOCK_ASSERT(sc);
if (sc->flags & IWI_FLAG_BUSY) {
device_printf(sc->sc_dev, "%s: cmd %d not sent, busy\n",
__func__, type);
return EAGAIN;
}
sc->flags |= IWI_FLAG_BUSY;
sc->sc_busy_timer = 2;
desc = &sc->cmdq.desc[sc->cmdq.cur];
desc->hdr.type = IWI_HDR_TYPE_COMMAND;
desc->hdr.flags = IWI_HDR_FLAG_IRQ;
desc->type = type;
desc->len = len;
memcpy(desc->data, data, len);
bus_dmamap_sync(sc->cmdq.desc_dmat, sc->cmdq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(2, ("sending command idx=%u type=%u len=%u\n", sc->cmdq.cur,
type, len));
sc->cmdq.cur = (sc->cmdq.cur + 1) % IWI_CMD_RING_COUNT;
CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur);
return msleep(sc, &sc->sc_mtx, 0, "iwicmd", hz);
}
static void
iwi_write_ibssnode(struct iwi_softc *sc,
const u_int8_t addr[IEEE80211_ADDR_LEN], int entry)
{
struct iwi_ibssnode node;
/* write node information into NIC memory */
memset(&node, 0, sizeof node);
IEEE80211_ADDR_COPY(node.bssid, addr);
DPRINTF(("%s mac %6D station %u\n", __func__, node.bssid, ":", entry));
CSR_WRITE_REGION_1(sc,
IWI_CSR_NODE_BASE + entry * sizeof node,
(uint8_t *)&node, sizeof node);
}
static int
-iwi_tx_start(struct ifnet *ifp, struct mbuf *m0, struct ieee80211_node *ni,
+iwi_tx_start(struct iwi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
int ac)
{
- struct iwi_softc *sc = ifp->if_softc;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct iwi_node *in = (struct iwi_node *)ni;
const struct ieee80211_frame *wh;
struct ieee80211_key *k;
const struct chanAccParams *cap;
struct iwi_tx_ring *txq = &sc->txq[ac];
struct iwi_tx_data *data;
struct iwi_tx_desc *desc;
struct mbuf *mnew;
bus_dma_segment_t segs[IWI_MAX_NSEG];
int error, nsegs, hdrlen, i;
int ismcast, flags, xflags, staid;
IWI_LOCK_ASSERT(sc);
wh = mtod(m0, const struct ieee80211_frame *);
/* NB: only data frames use this path */
hdrlen = ieee80211_hdrsize(wh);
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
flags = xflags = 0;
if (!ismcast)
flags |= IWI_DATA_FLAG_NEED_ACK;
if (vap->iv_flags & IEEE80211_F_SHPREAMBLE)
flags |= IWI_DATA_FLAG_SHPREAMBLE;
if (IEEE80211_QOS_HAS_SEQ(wh)) {
xflags |= IWI_DATA_XFLAG_QOS;
cap = &ic->ic_wme.wme_chanParams;
if (!cap->cap_wmeParams[ac].wmep_noackPolicy)
flags &= ~IWI_DATA_FLAG_NEED_ACK;
}
/*
* This is only used in IBSS mode where the firmware expect an index
* in a h/w table instead of a destination address.
*/
if (vap->iv_opmode == IEEE80211_M_IBSS) {
if (!ismcast) {
if (in->in_station == -1) {
in->in_station = alloc_unr(sc->sc_unr);
if (in->in_station == -1) {
/* h/w table is full */
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
m_freem(m0);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return 0;
}
iwi_write_ibssnode(sc,
ni->ni_macaddr, in->in_station);
}
staid = in->in_station;
} else {
/*
* Multicast addresses have no associated node
* so there will be no station entry. We reserve
* entry 0 for one mcast address and use that.
* If there are many being used this will be
* expensive and we'll need to do a better job
* but for now this handles the broadcast case.
*/
if (!IEEE80211_ADDR_EQ(wh->i_addr1, sc->sc_mcast)) {
IEEE80211_ADDR_COPY(sc->sc_mcast, wh->i_addr1);
iwi_write_ibssnode(sc, sc->sc_mcast, 0);
}
staid = 0;
}
} else
staid = 0;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
struct iwi_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
ieee80211_radiotap_tx(vap, m0);
}
data = &txq->data[txq->cur];
desc = &txq->desc[txq->cur];
/* save and trim IEEE802.11 header */
m_copydata(m0, 0, hdrlen, (caddr_t)&desc->wh);
m_adj(m0, hdrlen);
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs,
&nsegs, 0);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
m_freem(m0);
return ENOBUFS;
}
m0 = mnew;
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map,
m0, segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(m0);
return error;
}
}
data->m = m0;
data->ni = ni;
desc->hdr.type = IWI_HDR_TYPE_DATA;
desc->hdr.flags = IWI_HDR_FLAG_IRQ;
desc->station = staid;
desc->cmd = IWI_DATA_CMD_TX;
desc->len = htole16(m0->m_pkthdr.len);
desc->flags = flags;
desc->xflags = xflags;
#if 0
if (vap->iv_flags & IEEE80211_F_PRIVACY)
desc->wep_txkey = vap->iv_def_txkey;
else
#endif
desc->flags |= IWI_DATA_FLAG_NO_WEP;
desc->nseg = htole32(nsegs);
for (i = 0; i < nsegs; i++) {
desc->seg_addr[i] = htole32(segs[i].ds_addr);
desc->seg_len[i] = htole16(segs[i].ds_len);
}
bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE);
DPRINTFN(5, ("sending data frame txq=%u idx=%u len=%u nseg=%u\n",
ac, txq->cur, le16toh(desc->len), nsegs));
txq->queued++;
txq->cur = (txq->cur + 1) % IWI_TX_RING_COUNT;
CSR_WRITE_4(sc, txq->csr_widx, txq->cur);
return 0;
}
static int
iwi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
/* no support; just discard */
m_freem(m);
ieee80211_free_node(ni);
return 0;
}
+static int
+iwi_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct iwi_softc *sc = ic->ic_softc;
+ int error;
+ IWI_LOCK_DECL;
+
+ IWI_LOCK(sc);
+ if (!sc->sc_running) {
+ IWI_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ IWI_UNLOCK(sc);
+ return (error);
+ }
+ iwi_start(sc);
+ IWI_UNLOCK(sc);
+ return (0);
+}
+
static void
-iwi_start_locked(struct ifnet *ifp)
+iwi_start(struct iwi_softc *sc)
{
- struct iwi_softc *sc = ifp->if_softc;
struct mbuf *m;
struct ieee80211_node *ni;
int ac;
IWI_LOCK_ASSERT(sc);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ac = M_WME_GETAC(m);
if (sc->txq[ac].queued > IWI_TX_RING_COUNT - 8) {
/* there is no place left in this ring; tail drop */
/* XXX tail drop */
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ mbufq_prepend(&sc->sc_snd, m);
break;
}
-
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
- if (iwi_tx_start(ifp, m, ni, ac) != 0) {
+ if (iwi_tx_start(sc, m, ni, ac) != 0) {
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
break;
}
-
sc->sc_tx_timer = 5;
}
}
static void
-iwi_start(struct ifnet *ifp)
-{
- struct iwi_softc *sc = ifp->if_softc;
- IWI_LOCK_DECL;
-
- IWI_LOCK(sc);
- iwi_start_locked(ifp);
- IWI_UNLOCK(sc);
-}
-
-static void
iwi_watchdog(void *arg)
{
struct iwi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
IWI_LOCK_ASSERT(sc);
if (sc->sc_tx_timer > 0) {
if (--sc->sc_tx_timer == 0) {
- if_printf(ifp, "device timeout\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ device_printf(sc->sc_dev, "device timeout\n");
+ counter_u64_add(ic->ic_oerrors, 1);
ieee80211_runtask(ic, &sc->sc_restarttask);
}
}
if (sc->sc_state_timer > 0) {
if (--sc->sc_state_timer == 0) {
- if_printf(ifp, "firmware stuck in state %d, resetting\n",
+ device_printf(sc->sc_dev,
+ "firmware stuck in state %d, resetting\n",
sc->fw_state);
- if (sc->fw_state == IWI_FW_SCANNING) {
- struct ieee80211com *ic = ifp->if_l2com;
+ if (sc->fw_state == IWI_FW_SCANNING)
ieee80211_cancel_scan(TAILQ_FIRST(&ic->ic_vaps));
- }
ieee80211_runtask(ic, &sc->sc_restarttask);
sc->sc_state_timer = 3;
}
}
if (sc->sc_busy_timer > 0) {
if (--sc->sc_busy_timer == 0) {
- if_printf(ifp, "firmware command timeout, resetting\n");
+ device_printf(sc->sc_dev,
+ "firmware command timeout, resetting\n");
ieee80211_runtask(ic, &sc->sc_restarttask);
}
}
callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc);
}
+static void
+iwi_parent(struct ieee80211com *ic)
+{
+ struct iwi_softc *sc = ic->ic_softc;
+ int startall = 0;
+ IWI_LOCK_DECL;
+
+ IWI_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ if (!sc->sc_running) {
+ iwi_init_locked(sc);
+ startall = 1;
+ }
+ } else if (sc->sc_running)
+ iwi_stop_locked(sc);
+ IWI_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
+}
+
static int
-iwi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+iwi_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
{
- struct iwi_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ struct ifreq *ifr = data;
+ struct iwi_softc *sc = ic->ic_softc;
+ int error;
IWI_LOCK_DECL;
+ IWI_LOCK(sc);
switch (cmd) {
- case SIOCSIFFLAGS:
- IWI_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- iwi_init_locked(sc);
- startall = 1;
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- iwi_stop_locked(sc);
- }
- IWI_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
case SIOCGIWISTATS:
- IWI_LOCK(sc);
/* XXX validate permissions/memory/etc? */
error = copyout(&sc->sc_linkqual, ifr->ifr_data,
sizeof(struct iwi_notif_link_quality));
- IWI_UNLOCK(sc);
break;
case SIOCZIWISTATS:
- IWI_LOCK(sc);
memset(&sc->sc_linkqual, 0,
sizeof(struct iwi_notif_link_quality));
- IWI_UNLOCK(sc);
error = 0;
break;
default:
- error = EINVAL;
+ error = ENOTTY;
break;
}
- return error;
+ IWI_UNLOCK(sc);
+
+ return (error);
}
static void
iwi_stop_master(struct iwi_softc *sc)
{
uint32_t tmp;
int ntries;
/* disable interrupts */
CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, 0);
CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_STOP_MASTER);
for (ntries = 0; ntries < 5; ntries++) {
if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED)
break;
DELAY(10);
}
if (ntries == 5)
device_printf(sc->sc_dev, "timeout waiting for master\n");
tmp = CSR_READ_4(sc, IWI_CSR_RST);
CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_PRINCETON_RESET);
sc->flags &= ~IWI_FLAG_FW_INITED;
}
static int
iwi_reset(struct iwi_softc *sc)
{
uint32_t tmp;
int i, ntries;
iwi_stop_master(sc);
tmp = CSR_READ_4(sc, IWI_CSR_CTL);
CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT);
CSR_WRITE_4(sc, IWI_CSR_READ_INT, IWI_READ_INT_INIT_HOST);
/* wait for clock stabilization */
for (ntries = 0; ntries < 1000; ntries++) {
if (CSR_READ_4(sc, IWI_CSR_CTL) & IWI_CTL_CLOCK_READY)
break;
DELAY(200);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for clock stabilization\n");
return EIO;
}
tmp = CSR_READ_4(sc, IWI_CSR_RST);
CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_SOFT_RESET);
DELAY(10);
tmp = CSR_READ_4(sc, IWI_CSR_CTL);
CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT);
/* clear NIC memory */
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0);
for (i = 0; i < 0xc000; i++)
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0);
return 0;
}
static const struct iwi_firmware_ohdr *
iwi_setup_ofw(struct iwi_softc *sc, struct iwi_fw *fw)
{
const struct firmware *fp = fw->fp;
const struct iwi_firmware_ohdr *hdr;
if (fp->datasize < sizeof (struct iwi_firmware_ohdr)) {
device_printf(sc->sc_dev, "image '%s' too small\n", fp->name);
return NULL;
}
hdr = (const struct iwi_firmware_ohdr *)fp->data;
if ((IWI_FW_GET_MAJOR(le32toh(hdr->version)) != IWI_FW_REQ_MAJOR) ||
(IWI_FW_GET_MINOR(le32toh(hdr->version)) != IWI_FW_REQ_MINOR)) {
device_printf(sc->sc_dev, "version for '%s' %d.%d != %d.%d\n",
fp->name, IWI_FW_GET_MAJOR(le32toh(hdr->version)),
IWI_FW_GET_MINOR(le32toh(hdr->version)), IWI_FW_REQ_MAJOR,
IWI_FW_REQ_MINOR);
return NULL;
}
fw->data = ((const char *) fp->data) + sizeof(struct iwi_firmware_ohdr);
fw->size = fp->datasize - sizeof(struct iwi_firmware_ohdr);
fw->name = fp->name;
return hdr;
}
static const struct iwi_firmware_ohdr *
iwi_setup_oucode(struct iwi_softc *sc, struct iwi_fw *fw)
{
const struct iwi_firmware_ohdr *hdr;
hdr = iwi_setup_ofw(sc, fw);
if (hdr != NULL && le32toh(hdr->mode) != IWI_FW_MODE_UCODE) {
device_printf(sc->sc_dev, "%s is not a ucode image\n",
fw->name);
hdr = NULL;
}
return hdr;
}
static void
iwi_getfw(struct iwi_fw *fw, const char *fwname,
struct iwi_fw *uc, const char *ucname)
{
if (fw->fp == NULL)
fw->fp = firmware_get(fwname);
/* NB: pre-3.0 ucode is packaged separately */
if (uc->fp == NULL && fw->fp != NULL && fw->fp->version < 300)
uc->fp = firmware_get(ucname);
}
/*
* Get the required firmware images if not already loaded.
* Note that we hold firmware images so long as the device
* is marked up in case we need to reload them on device init.
* This is necessary because we re-init the device sometimes
* from a context where we cannot read from the filesystem
* (e.g. from the taskqueue thread when rfkill is re-enabled).
* XXX return 0 on success, 1 on error.
*
* NB: the order of get'ing and put'ing images here is
* intentional to support handling firmware images bundled
* by operating mode and/or all together in one file with
* the boot firmware as "master".
*/
static int
iwi_get_firmware(struct iwi_softc *sc, enum ieee80211_opmode opmode)
{
const struct iwi_firmware_hdr *hdr;
const struct firmware *fp;
/* invalidate cached firmware on mode change */
if (sc->fw_mode != opmode)
iwi_put_firmware(sc);
switch (opmode) {
case IEEE80211_M_STA:
iwi_getfw(&sc->fw_fw, "iwi_bss", &sc->fw_uc, "iwi_ucode_bss");
break;
case IEEE80211_M_IBSS:
iwi_getfw(&sc->fw_fw, "iwi_ibss", &sc->fw_uc, "iwi_ucode_ibss");
break;
case IEEE80211_M_MONITOR:
iwi_getfw(&sc->fw_fw, "iwi_monitor",
&sc->fw_uc, "iwi_ucode_monitor");
break;
default:
device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
return EINVAL;
}
fp = sc->fw_fw.fp;
if (fp == NULL) {
device_printf(sc->sc_dev, "could not load firmware\n");
goto bad;
}
if (fp->version < 300) {
/*
* Firmware prior to 3.0 was packaged as separate
* boot, firmware, and ucode images. Verify the
* ucode image was read in, retrieve the boot image
* if needed, and check version stamps for consistency.
* The version stamps in the data are also checked
* above; this is a bit paranoid but is a cheap
* safeguard against mis-packaging.
*/
if (sc->fw_uc.fp == NULL) {
device_printf(sc->sc_dev, "could not load ucode\n");
goto bad;
}
if (sc->fw_boot.fp == NULL) {
sc->fw_boot.fp = firmware_get("iwi_boot");
if (sc->fw_boot.fp == NULL) {
device_printf(sc->sc_dev,
"could not load boot firmware\n");
goto bad;
}
}
if (sc->fw_boot.fp->version != sc->fw_fw.fp->version ||
sc->fw_boot.fp->version != sc->fw_uc.fp->version) {
device_printf(sc->sc_dev,
"firmware version mismatch: "
"'%s' is %d, '%s' is %d, '%s' is %d\n",
sc->fw_boot.fp->name, sc->fw_boot.fp->version,
sc->fw_uc.fp->name, sc->fw_uc.fp->version,
sc->fw_fw.fp->name, sc->fw_fw.fp->version
);
goto bad;
}
/*
* Check and setup each image.
*/
if (iwi_setup_oucode(sc, &sc->fw_uc) == NULL ||
iwi_setup_ofw(sc, &sc->fw_boot) == NULL ||
iwi_setup_ofw(sc, &sc->fw_fw) == NULL)
goto bad;
} else {
/*
* Check and setup combined image.
*/
if (fp->datasize < sizeof(struct iwi_firmware_hdr)) {
device_printf(sc->sc_dev, "image '%s' too small\n",
fp->name);
goto bad;
}
hdr = (const struct iwi_firmware_hdr *)fp->data;
if (fp->datasize < sizeof(*hdr) + le32toh(hdr->bsize) + le32toh(hdr->usize)
+ le32toh(hdr->fsize)) {
device_printf(sc->sc_dev, "image '%s' too small (2)\n",
fp->name);
goto bad;
}
sc->fw_boot.data = ((const char *) fp->data) + sizeof(*hdr);
sc->fw_boot.size = le32toh(hdr->bsize);
sc->fw_boot.name = fp->name;
sc->fw_uc.data = sc->fw_boot.data + sc->fw_boot.size;
sc->fw_uc.size = le32toh(hdr->usize);
sc->fw_uc.name = fp->name;
sc->fw_fw.data = sc->fw_uc.data + sc->fw_uc.size;
sc->fw_fw.size = le32toh(hdr->fsize);
sc->fw_fw.name = fp->name;
}
#if 0
device_printf(sc->sc_dev, "boot %d ucode %d fw %d bytes\n",
sc->fw_boot.size, sc->fw_uc.size, sc->fw_fw.size);
#endif
sc->fw_mode = opmode;
return 0;
bad:
iwi_put_firmware(sc);
return 1;
}
static void
iwi_put_fw(struct iwi_fw *fw)
{
if (fw->fp != NULL) {
firmware_put(fw->fp, FIRMWARE_UNLOAD);
fw->fp = NULL;
}
fw->data = NULL;
fw->size = 0;
fw->name = NULL;
}
/*
* Release any cached firmware images.
*/
static void
iwi_put_firmware(struct iwi_softc *sc)
{
iwi_put_fw(&sc->fw_uc);
iwi_put_fw(&sc->fw_fw);
iwi_put_fw(&sc->fw_boot);
}
static int
iwi_load_ucode(struct iwi_softc *sc, const struct iwi_fw *fw)
{
uint32_t tmp;
const uint16_t *w;
const char *uc = fw->data;
size_t size = fw->size;
int i, ntries, error;
IWI_LOCK_ASSERT(sc);
error = 0;
CSR_WRITE_4(sc, IWI_CSR_RST, CSR_READ_4(sc, IWI_CSR_RST) |
IWI_RST_STOP_MASTER);
for (ntries = 0; ntries < 5; ntries++) {
if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED)
break;
DELAY(10);
}
if (ntries == 5) {
device_printf(sc->sc_dev, "timeout waiting for master\n");
error = EIO;
goto fail;
}
MEM_WRITE_4(sc, 0x3000e0, 0x80000000);
DELAY(5000);
tmp = CSR_READ_4(sc, IWI_CSR_RST);
tmp &= ~IWI_RST_PRINCETON_RESET;
CSR_WRITE_4(sc, IWI_CSR_RST, tmp);
DELAY(5000);
MEM_WRITE_4(sc, 0x3000e0, 0);
DELAY(1000);
MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 1);
DELAY(1000);
MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 0);
DELAY(1000);
MEM_WRITE_1(sc, 0x200000, 0x00);
MEM_WRITE_1(sc, 0x200000, 0x40);
DELAY(1000);
/* write microcode into adapter memory */
for (w = (const uint16_t *)uc; size > 0; w++, size -= 2)
MEM_WRITE_2(sc, 0x200010, htole16(*w));
MEM_WRITE_1(sc, 0x200000, 0x00);
MEM_WRITE_1(sc, 0x200000, 0x80);
/* wait until we get an answer */
for (ntries = 0; ntries < 100; ntries++) {
if (MEM_READ_1(sc, 0x200000) & 1)
break;
DELAY(100);
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"timeout waiting for ucode to initialize\n");
error = EIO;
goto fail;
}
/* read the answer or the firmware will not initialize properly */
for (i = 0; i < 7; i++)
MEM_READ_4(sc, 0x200004);
MEM_WRITE_1(sc, 0x200000, 0x00);
fail:
return error;
}
/* macro to handle unaligned little endian data in firmware image */
#define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24)
static int
iwi_load_firmware(struct iwi_softc *sc, const struct iwi_fw *fw)
{
u_char *p, *end;
uint32_t sentinel, ctl, src, dst, sum, len, mlen, tmp;
int ntries, error;
IWI_LOCK_ASSERT(sc);
/* copy firmware image to DMA memory */
memcpy(sc->fw_virtaddr, fw->data, fw->size);
/* make sure the adapter will get up-to-date values */
bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_PREWRITE);
/* tell the adapter where the command blocks are stored */
MEM_WRITE_4(sc, 0x3000a0, 0x27000);
/*
* Store command blocks into adapter's internal memory using register
* indirections. The adapter will read the firmware image through DMA
* using information stored in command blocks.
*/
src = sc->fw_physaddr;
p = sc->fw_virtaddr;
end = p + fw->size;
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0x27000);
while (p < end) {
dst = GETLE32(p); p += 4; src += 4;
len = GETLE32(p); p += 4; src += 4;
p += len;
while (len > 0) {
mlen = min(len, IWI_CB_MAXDATALEN);
ctl = IWI_CB_DEFAULT_CTL | mlen;
sum = ctl ^ src ^ dst;
/* write a command block */
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, ctl);
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, src);
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, dst);
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, sum);
src += mlen;
dst += mlen;
len -= mlen;
}
}
/* write a fictive final command block (sentinel) */
sentinel = CSR_READ_4(sc, IWI_CSR_AUTOINC_ADDR);
CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0);
tmp = CSR_READ_4(sc, IWI_CSR_RST);
tmp &= ~(IWI_RST_MASTER_DISABLED | IWI_RST_STOP_MASTER);
CSR_WRITE_4(sc, IWI_CSR_RST, tmp);
/* tell the adapter to start processing command blocks */
MEM_WRITE_4(sc, 0x3000a4, 0x540100);
/* wait until the adapter reaches the sentinel */
for (ntries = 0; ntries < 400; ntries++) {
if (MEM_READ_4(sc, 0x3000d0) >= sentinel)
break;
DELAY(100);
}
/* sync dma, just in case */
bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_POSTWRITE);
if (ntries == 400) {
device_printf(sc->sc_dev,
"timeout processing command blocks for %s firmware\n",
fw->name);
return EIO;
}
/* we're done with command blocks processing */
MEM_WRITE_4(sc, 0x3000a4, 0x540c00);
/* allow interrupts so we know when the firmware is ready */
CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, IWI_INTR_MASK);
/* tell the adapter to initialize the firmware */
CSR_WRITE_4(sc, IWI_CSR_RST, 0);
tmp = CSR_READ_4(sc, IWI_CSR_CTL);
CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_ALLOW_STANDBY);
/* wait at most one second for firmware initialization to complete */
if ((error = msleep(sc, &sc->sc_mtx, 0, "iwiinit", hz)) != 0) {
device_printf(sc->sc_dev, "timeout waiting for %s firmware "
"initialization to complete\n", fw->name);
}
return error;
}
static int
iwi_setpowermode(struct iwi_softc *sc, struct ieee80211vap *vap)
{
uint32_t data;
if (vap->iv_flags & IEEE80211_F_PMGTON) {
/* XXX set more fine-grained operation */
data = htole32(IWI_POWER_MODE_MAX);
} else
data = htole32(IWI_POWER_MODE_CAM);
DPRINTF(("Setting power mode to %u\n", le32toh(data)));
return iwi_cmd(sc, IWI_CMD_SET_POWER_MODE, &data, sizeof data);
}
static int
iwi_setwepkeys(struct iwi_softc *sc, struct ieee80211vap *vap)
{
struct iwi_wep_key wepkey;
struct ieee80211_key *wk;
int error, i;
for (i = 0; i < IEEE80211_WEP_NKID; i++) {
wk = &vap->iv_nw_keys[i];
wepkey.cmd = IWI_WEP_KEY_CMD_SETKEY;
wepkey.idx = i;
wepkey.len = wk->wk_keylen;
memset(wepkey.key, 0, sizeof wepkey.key);
memcpy(wepkey.key, wk->wk_key, wk->wk_keylen);
DPRINTF(("Setting wep key index %u len %u\n", wepkey.idx,
wepkey.len));
error = iwi_cmd(sc, IWI_CMD_SET_WEP_KEY, &wepkey,
sizeof wepkey);
if (error != 0)
return error;
}
return 0;
}
static int
iwi_config(struct iwi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct iwi_configuration config;
struct iwi_rateset rs;
struct iwi_txpower power;
uint32_t data;
int error, i;
IWI_LOCK_ASSERT(sc);
- DPRINTF(("Setting MAC address to %6D\n", IF_LLADDR(ifp), ":"));
- error = iwi_cmd(sc, IWI_CMD_SET_MAC_ADDRESS, IF_LLADDR(ifp),
+ DPRINTF(("Setting MAC address to %6D\n", ic->ic_macaddr, ":"));
+ error = iwi_cmd(sc, IWI_CMD_SET_MAC_ADDRESS, ic->ic_macaddr,
IEEE80211_ADDR_LEN);
if (error != 0)
return error;
memset(&config, 0, sizeof config);
config.bluetooth_coexistence = sc->bluetooth;
config.silence_threshold = 0x1e;
config.antenna = sc->antenna;
config.multicast_enabled = 1;
config.answer_pbreq = (ic->ic_opmode == IEEE80211_M_IBSS) ? 1 : 0;
config.disable_unicast_decryption = 1;
config.disable_multicast_decryption = 1;
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
config.allow_invalid_frames = 1;
config.allow_beacon_and_probe_resp = 1;
config.allow_mgt = 1;
}
DPRINTF(("Configuring adapter\n"));
error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config);
if (error != 0)
return error;
if (ic->ic_opmode == IEEE80211_M_IBSS) {
power.mode = IWI_MODE_11B;
power.nchan = 11;
for (i = 0; i < 11; i++) {
power.chan[i].chan = i + 1;
power.chan[i].power = IWI_TXPOWER_MAX;
}
DPRINTF(("Setting .11b channels tx power\n"));
error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power);
if (error != 0)
return error;
power.mode = IWI_MODE_11G;
DPRINTF(("Setting .11g channels tx power\n"));
error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power);
if (error != 0)
return error;
}
memset(&rs, 0, sizeof rs);
rs.mode = IWI_MODE_11G;
rs.type = IWI_RATESET_TYPE_SUPPORTED;
rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11G].rs_nrates;
memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11G].rs_rates,
rs.nrates);
DPRINTF(("Setting .11bg supported rates (%u)\n", rs.nrates));
error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs);
if (error != 0)
return error;
memset(&rs, 0, sizeof rs);
rs.mode = IWI_MODE_11A;
rs.type = IWI_RATESET_TYPE_SUPPORTED;
rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates;
memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11A].rs_rates,
rs.nrates);
DPRINTF(("Setting .11a supported rates (%u)\n", rs.nrates));
error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs);
if (error != 0)
return error;
data = htole32(arc4random());
DPRINTF(("Setting initialization vector to %u\n", le32toh(data)));
error = iwi_cmd(sc, IWI_CMD_SET_IV, &data, sizeof data);
if (error != 0)
return error;
/* enable adapter */
DPRINTF(("Enabling adapter\n"));
return iwi_cmd(sc, IWI_CMD_ENABLE, NULL, 0);
}
static __inline void
set_scan_type(struct iwi_scan_ext *scan, int ix, int scan_type)
{
uint8_t *st = &scan->scan_type[ix / 2];
if (ix % 2)
*st = (*st & 0xf0) | ((scan_type & 0xf) << 0);
else
*st = (*st & 0x0f) | ((scan_type & 0xf) << 4);
}
static int
scan_type(const struct ieee80211_scan_state *ss,
const struct ieee80211_channel *chan)
{
/* We can only set one essid for a directed scan */
if (ss->ss_nssid != 0)
return IWI_SCAN_TYPE_BDIRECTED;
if ((ss->ss_flags & IEEE80211_SCAN_ACTIVE) &&
(chan->ic_flags & IEEE80211_CHAN_PASSIVE) == 0)
return IWI_SCAN_TYPE_BROADCAST;
return IWI_SCAN_TYPE_PASSIVE;
}
static __inline int
scan_band(const struct ieee80211_channel *c)
{
return IEEE80211_IS_CHAN_5GHZ(c) ? IWI_CHAN_5GHZ : IWI_CHAN_2GHZ;
}
static void
iwi_monitor_scan(void *arg, int npending)
{
struct iwi_softc *sc = arg;
IWI_LOCK_DECL;
IWI_LOCK(sc);
(void) iwi_scanchan(sc, 2000, 0);
IWI_UNLOCK(sc);
}
/*
* Start a scan on the current channel or all channels.
*/
static int
iwi_scanchan(struct iwi_softc *sc, unsigned long maxdwell, int allchan)
{
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_channel *chan;
struct ieee80211_scan_state *ss;
struct iwi_scan_ext scan;
int error = 0;
IWI_LOCK_ASSERT(sc);
if (sc->fw_state == IWI_FW_SCANNING) {
/*
* This should not happen as we only trigger scan_next after
* completion
*/
DPRINTF(("%s: called too early - still scanning\n", __func__));
return (EBUSY);
}
IWI_STATE_BEGIN(sc, IWI_FW_SCANNING);
- ic = sc->sc_ifp->if_l2com;
ss = ic->ic_scan;
memset(&scan, 0, sizeof scan);
scan.full_scan_index = htole32(++sc->sc_scangen);
scan.dwell_time[IWI_SCAN_TYPE_PASSIVE] = htole16(maxdwell);
if (ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) {
/*
* Use very short dwell times for when we send probe request
* frames. Without this bg scans hang. Ideally this should
* be handled with early-termination as done by net80211 but
* that's not feasible (aborting a scan is problematic).
*/
scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(30);
scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(30);
} else {
scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(maxdwell);
scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(maxdwell);
}
/* We can only set one essid for a directed scan */
if (ss->ss_nssid != 0) {
error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ss->ss_ssid[0].ssid,
ss->ss_ssid[0].len);
if (error)
return (error);
}
if (allchan) {
int i, next, band, b, bstart;
/*
* Convert scan list to run-length encoded channel list
* the firmware requires (preserving the order setup by
* net80211). The first entry in each run specifies the
* band and the count of items in the run.
*/
next = 0; /* next open slot */
bstart = 0; /* NB: not needed, silence compiler */
band = -1; /* NB: impossible value */
KASSERT(ss->ss_last > 0, ("no channels"));
for (i = 0; i < ss->ss_last; i++) {
chan = ss->ss_chans[i];
b = scan_band(chan);
if (b != band) {
if (band != -1)
scan.channels[bstart] =
(next - bstart) | band;
/* NB: this allocates a slot for the run-len */
band = b, bstart = next++;
}
if (next >= IWI_SCAN_CHANNELS) {
DPRINTF(("truncating scan list\n"));
break;
}
scan.channels[next] = ieee80211_chan2ieee(ic, chan);
set_scan_type(&scan, next, scan_type(ss, chan));
next++;
}
scan.channels[bstart] = (next - bstart) | band;
} else {
/* Scan the current channel only */
chan = ic->ic_curchan;
scan.channels[0] = 1 | scan_band(chan);
scan.channels[1] = ieee80211_chan2ieee(ic, chan);
set_scan_type(&scan, 1, scan_type(ss, chan));
}
#ifdef IWI_DEBUG
if (iwi_debug > 0) {
static const char *scantype[8] =
{ "PSTOP", "PASV", "DIR", "BCAST", "BDIR", "5", "6", "7" };
int i;
printf("Scan request: index %u dwell %d/%d/%d\n"
, le32toh(scan.full_scan_index)
, le16toh(scan.dwell_time[IWI_SCAN_TYPE_PASSIVE])
, le16toh(scan.dwell_time[IWI_SCAN_TYPE_BROADCAST])
, le16toh(scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED])
);
i = 0;
do {
int run = scan.channels[i];
if (run == 0)
break;
printf("Scan %d %s channels:", run & 0x3f,
run & IWI_CHAN_2GHZ ? "2.4GHz" : "5GHz");
for (run &= 0x3f, i++; run > 0; run--, i++) {
uint8_t type = scan.scan_type[i/2];
printf(" %u/%s", scan.channels[i],
scantype[(i & 1 ? type : type>>4) & 7]);
}
printf("\n");
} while (i < IWI_SCAN_CHANNELS);
}
#endif
return (iwi_cmd(sc, IWI_CMD_SCAN_EXT, &scan, sizeof scan));
}
static int
iwi_set_sensitivity(struct iwi_softc *sc, int8_t rssi_dbm)
{
struct iwi_sensitivity sens;
DPRINTF(("Setting sensitivity to %d\n", rssi_dbm));
memset(&sens, 0, sizeof sens);
sens.rssi = htole16(rssi_dbm);
return iwi_cmd(sc, IWI_CMD_SET_SENSITIVITY, &sens, sizeof sens);
}
static int
iwi_auth_and_assoc(struct iwi_softc *sc, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = vap->iv_ifp;
struct ieee80211_node *ni;
struct iwi_configuration config;
struct iwi_associate *assoc = &sc->assoc;
struct iwi_rateset rs;
uint16_t capinfo;
uint32_t data;
int error, mode;
IWI_LOCK_ASSERT(sc);
ni = ieee80211_ref_node(vap->iv_bss);
if (sc->flags & IWI_FLAG_ASSOCIATED) {
DPRINTF(("Already associated\n"));
return (-1);
}
IWI_STATE_BEGIN(sc, IWI_FW_ASSOCIATING);
error = 0;
mode = 0;
if (IEEE80211_IS_CHAN_A(ic->ic_curchan))
mode = IWI_MODE_11A;
else if (IEEE80211_IS_CHAN_G(ic->ic_curchan))
mode = IWI_MODE_11G;
if (IEEE80211_IS_CHAN_B(ic->ic_curchan))
mode = IWI_MODE_11B;
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
memset(&config, 0, sizeof config);
config.bluetooth_coexistence = sc->bluetooth;
config.antenna = sc->antenna;
config.multicast_enabled = 1;
if (mode == IWI_MODE_11G)
config.use_protection = 1;
config.answer_pbreq =
(vap->iv_opmode == IEEE80211_M_IBSS) ? 1 : 0;
config.disable_unicast_decryption = 1;
config.disable_multicast_decryption = 1;
DPRINTF(("Configuring adapter\n"));
error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config);
if (error != 0)
goto done;
}
#ifdef IWI_DEBUG
if (iwi_debug > 0) {
printf("Setting ESSID to ");
ieee80211_print_essid(ni->ni_essid, ni->ni_esslen);
printf("\n");
}
#endif
error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ni->ni_essid, ni->ni_esslen);
if (error != 0)
goto done;
error = iwi_setpowermode(sc, vap);
if (error != 0)
goto done;
data = htole32(vap->iv_rtsthreshold);
DPRINTF(("Setting RTS threshold to %u\n", le32toh(data)));
error = iwi_cmd(sc, IWI_CMD_SET_RTS_THRESHOLD, &data, sizeof data);
if (error != 0)
goto done;
data = htole32(vap->iv_fragthreshold);
DPRINTF(("Setting fragmentation threshold to %u\n", le32toh(data)));
error = iwi_cmd(sc, IWI_CMD_SET_FRAG_THRESHOLD, &data, sizeof data);
if (error != 0)
goto done;
/* the rate set has already been "negotiated" */
memset(&rs, 0, sizeof rs);
rs.mode = mode;
rs.type = IWI_RATESET_TYPE_NEGOTIATED;
rs.nrates = ni->ni_rates.rs_nrates;
if (rs.nrates > IWI_RATESET_SIZE) {
DPRINTF(("Truncating negotiated rate set from %u\n",
rs.nrates));
rs.nrates = IWI_RATESET_SIZE;
}
memcpy(rs.rates, ni->ni_rates.rs_rates, rs.nrates);
DPRINTF(("Setting negotiated rates (%u)\n", rs.nrates));
error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs);
if (error != 0)
goto done;
memset(assoc, 0, sizeof *assoc);
if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) {
/* NB: don't treat WME setup as failure */
if (iwi_wme_setparams(sc) == 0 && iwi_wme_setie(sc) == 0)
assoc->policy |= htole16(IWI_POLICY_WME);
/* XXX complain on failure? */
}
if (vap->iv_appie_wpa != NULL) {
struct ieee80211_appie *ie = vap->iv_appie_wpa;
DPRINTF(("Setting optional IE (len=%u)\n", ie->ie_len));
error = iwi_cmd(sc, IWI_CMD_SET_OPTIE, ie->ie_data, ie->ie_len);
if (error != 0)
goto done;
}
error = iwi_set_sensitivity(sc, ic->ic_node_getrssi(ni));
if (error != 0)
goto done;
assoc->mode = mode;
assoc->chan = ic->ic_curchan->ic_ieee;
/*
* NB: do not arrange for shared key auth w/o privacy
* (i.e. a wep key); it causes a firmware error.
*/
if ((vap->iv_flags & IEEE80211_F_PRIVACY) &&
ni->ni_authmode == IEEE80211_AUTH_SHARED) {
assoc->auth = IWI_AUTH_SHARED;
/*
* It's possible to have privacy marked but no default
* key setup. This typically is due to a user app bug
* but if we blindly grab the key the firmware will
* barf so avoid it for now.
*/
if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE)
assoc->auth |= vap->iv_def_txkey << 4;
error = iwi_setwepkeys(sc, vap);
if (error != 0)
goto done;
}
if (vap->iv_flags & IEEE80211_F_WPA)
assoc->policy |= htole16(IWI_POLICY_WPA);
if (vap->iv_opmode == IEEE80211_M_IBSS && ni->ni_tstamp.tsf == 0)
assoc->type = IWI_HC_IBSS_START;
else
assoc->type = IWI_HC_ASSOC;
memcpy(assoc->tstamp, ni->ni_tstamp.data, 8);
if (vap->iv_opmode == IEEE80211_M_IBSS)
capinfo = IEEE80211_CAPINFO_IBSS;
else
capinfo = IEEE80211_CAPINFO_ESS;
if (vap->iv_flags & IEEE80211_F_PRIVACY)
capinfo |= IEEE80211_CAPINFO_PRIVACY;
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
if (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME)
capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
assoc->capinfo = htole16(capinfo);
assoc->lintval = htole16(ic->ic_lintval);
assoc->intval = htole16(ni->ni_intval);
IEEE80211_ADDR_COPY(assoc->bssid, ni->ni_bssid);
if (vap->iv_opmode == IEEE80211_M_IBSS)
IEEE80211_ADDR_COPY(assoc->dst, ifp->if_broadcastaddr);
else
IEEE80211_ADDR_COPY(assoc->dst, ni->ni_bssid);
DPRINTF(("%s bssid %6D dst %6D channel %u policy 0x%x "
"auth %u capinfo 0x%x lintval %u bintval %u\n",
assoc->type == IWI_HC_IBSS_START ? "Start" : "Join",
assoc->bssid, ":", assoc->dst, ":",
assoc->chan, le16toh(assoc->policy), assoc->auth,
le16toh(assoc->capinfo), le16toh(assoc->lintval),
le16toh(assoc->intval)));
error = iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc);
done:
ieee80211_free_node(ni);
if (error)
IWI_STATE_END(sc, IWI_FW_ASSOCIATING);
return (error);
}
static void
iwi_disassoc(void *arg, int pending)
{
struct iwi_softc *sc = arg;
IWI_LOCK_DECL;
IWI_LOCK(sc);
iwi_disassociate(sc, 0);
IWI_UNLOCK(sc);
}
static int
iwi_disassociate(struct iwi_softc *sc, int quiet)
{
struct iwi_associate *assoc = &sc->assoc;
if ((sc->flags & IWI_FLAG_ASSOCIATED) == 0) {
DPRINTF(("Not associated\n"));
return (-1);
}
IWI_STATE_BEGIN(sc, IWI_FW_DISASSOCIATING);
if (quiet)
assoc->type = IWI_HC_DISASSOC_QUIET;
else
assoc->type = IWI_HC_DISASSOC;
DPRINTF(("Trying to disassociate from %6D channel %u\n",
assoc->bssid, ":", assoc->chan));
return iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc);
}
/*
* release dma resources for the firmware
*/
static void
iwi_release_fw_dma(struct iwi_softc *sc)
{
if (sc->fw_flags & IWI_FW_HAVE_PHY)
bus_dmamap_unload(sc->fw_dmat, sc->fw_map);
if (sc->fw_flags & IWI_FW_HAVE_MAP)
bus_dmamem_free(sc->fw_dmat, sc->fw_virtaddr, sc->fw_map);
if (sc->fw_flags & IWI_FW_HAVE_DMAT)
bus_dma_tag_destroy(sc->fw_dmat);
sc->fw_flags = 0;
sc->fw_dma_size = 0;
sc->fw_dmat = NULL;
sc->fw_map = NULL;
sc->fw_physaddr = 0;
sc->fw_virtaddr = NULL;
}
/*
* allocate the dma descriptor for the firmware.
* Return 0 on success, 1 on error.
* Must be called unlocked, protected by IWI_FLAG_FW_LOADING.
*/
static int
iwi_init_fw_dma(struct iwi_softc *sc, int size)
{
if (sc->fw_dma_size >= size)
return 0;
if (bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
size, 1, size, 0, NULL, NULL, &sc->fw_dmat) != 0) {
device_printf(sc->sc_dev,
"could not create firmware DMA tag\n");
goto error;
}
sc->fw_flags |= IWI_FW_HAVE_DMAT;
if (bus_dmamem_alloc(sc->fw_dmat, &sc->fw_virtaddr, 0,
&sc->fw_map) != 0) {
device_printf(sc->sc_dev,
"could not allocate firmware DMA memory\n");
goto error;
}
sc->fw_flags |= IWI_FW_HAVE_MAP;
if (bus_dmamap_load(sc->fw_dmat, sc->fw_map, sc->fw_virtaddr,
size, iwi_dma_map_addr, &sc->fw_physaddr, 0) != 0) {
device_printf(sc->sc_dev, "could not load firmware DMA map\n");
goto error;
}
sc->fw_flags |= IWI_FW_HAVE_PHY;
sc->fw_dma_size = size;
return 0;
error:
iwi_release_fw_dma(sc);
return 1;
}
static void
iwi_init_locked(struct iwi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
struct iwi_rx_data *data;
int i;
IWI_LOCK_ASSERT(sc);
if (sc->fw_state == IWI_FW_LOADING) {
device_printf(sc->sc_dev, "%s: already loading\n", __func__);
return; /* XXX: condvar? */
}
iwi_stop_locked(sc);
IWI_STATE_BEGIN(sc, IWI_FW_LOADING);
if (iwi_reset(sc) != 0) {
device_printf(sc->sc_dev, "could not reset adapter\n");
goto fail;
}
if (iwi_load_firmware(sc, &sc->fw_boot) != 0) {
device_printf(sc->sc_dev,
"could not load boot firmware %s\n", sc->fw_boot.name);
goto fail;
}
if (iwi_load_ucode(sc, &sc->fw_uc) != 0) {
device_printf(sc->sc_dev,
"could not load microcode %s\n", sc->fw_uc.name);
goto fail;
}
iwi_stop_master(sc);
CSR_WRITE_4(sc, IWI_CSR_CMD_BASE, sc->cmdq.physaddr);
CSR_WRITE_4(sc, IWI_CSR_CMD_SIZE, sc->cmdq.count);
CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur);
CSR_WRITE_4(sc, IWI_CSR_TX1_BASE, sc->txq[0].physaddr);
CSR_WRITE_4(sc, IWI_CSR_TX1_SIZE, sc->txq[0].count);
CSR_WRITE_4(sc, IWI_CSR_TX1_WIDX, sc->txq[0].cur);
CSR_WRITE_4(sc, IWI_CSR_TX2_BASE, sc->txq[1].physaddr);
CSR_WRITE_4(sc, IWI_CSR_TX2_SIZE, sc->txq[1].count);
CSR_WRITE_4(sc, IWI_CSR_TX2_WIDX, sc->txq[1].cur);
CSR_WRITE_4(sc, IWI_CSR_TX3_BASE, sc->txq[2].physaddr);
CSR_WRITE_4(sc, IWI_CSR_TX3_SIZE, sc->txq[2].count);
CSR_WRITE_4(sc, IWI_CSR_TX3_WIDX, sc->txq[2].cur);
CSR_WRITE_4(sc, IWI_CSR_TX4_BASE, sc->txq[3].physaddr);
CSR_WRITE_4(sc, IWI_CSR_TX4_SIZE, sc->txq[3].count);
CSR_WRITE_4(sc, IWI_CSR_TX4_WIDX, sc->txq[3].cur);
for (i = 0; i < sc->rxq.count; i++) {
data = &sc->rxq.data[i];
CSR_WRITE_4(sc, data->reg, data->physaddr);
}
CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, sc->rxq.count - 1);
if (iwi_load_firmware(sc, &sc->fw_fw) != 0) {
device_printf(sc->sc_dev,
"could not load main firmware %s\n", sc->fw_fw.name);
goto fail;
}
sc->flags |= IWI_FLAG_FW_INITED;
IWI_STATE_END(sc, IWI_FW_LOADING);
if (iwi_config(sc) != 0) {
device_printf(sc->sc_dev, "unable to enable adapter\n");
goto fail2;
}
callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_running = 1;
return;
fail:
IWI_STATE_END(sc, IWI_FW_LOADING);
fail2:
iwi_stop_locked(sc);
}
static void
iwi_init(void *priv)
{
struct iwi_softc *sc = priv;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
IWI_LOCK_DECL;
IWI_LOCK(sc);
iwi_init_locked(sc);
IWI_UNLOCK(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_running)
ieee80211_start_all(ic);
}
static void
iwi_stop_locked(void *priv)
{
struct iwi_softc *sc = priv;
- struct ifnet *ifp = sc->sc_ifp;
IWI_LOCK_ASSERT(sc);
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_running = 0;
if (sc->sc_softled) {
callout_stop(&sc->sc_ledtimer);
sc->sc_blinking = 0;
}
callout_stop(&sc->sc_wdtimer);
callout_stop(&sc->sc_rftimer);
iwi_stop_master(sc);
CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_SOFT_RESET);
/* reset rings */
iwi_reset_cmd_ring(sc, &sc->cmdq);
iwi_reset_tx_ring(sc, &sc->txq[0]);
iwi_reset_tx_ring(sc, &sc->txq[1]);
iwi_reset_tx_ring(sc, &sc->txq[2]);
iwi_reset_tx_ring(sc, &sc->txq[3]);
iwi_reset_rx_ring(sc, &sc->rxq);
sc->sc_tx_timer = 0;
sc->sc_state_timer = 0;
sc->sc_busy_timer = 0;
sc->flags &= ~(IWI_FLAG_BUSY | IWI_FLAG_ASSOCIATED);
sc->fw_state = IWI_FW_IDLE;
wakeup(sc);
}
static void
iwi_stop(struct iwi_softc *sc)
{
IWI_LOCK_DECL;
IWI_LOCK(sc);
iwi_stop_locked(sc);
IWI_UNLOCK(sc);
}
static void
iwi_restart(void *arg, int npending)
{
struct iwi_softc *sc = arg;
iwi_init(sc);
}
/*
* Return whether or not the radio is enabled in hardware
* (i.e. the rfkill switch is "off").
*/
static int
iwi_getrfkill(struct iwi_softc *sc)
{
return (CSR_READ_4(sc, IWI_CSR_IO) & IWI_IO_RADIO_ENABLED) == 0;
}
static void
iwi_radio_on(void *arg, int pending)
{
struct iwi_softc *sc = arg;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
device_printf(sc->sc_dev, "radio turned on\n");
iwi_init(sc);
ieee80211_notify_radio(ic, 1);
}
static void
iwi_rfkill_poll(void *arg)
{
struct iwi_softc *sc = arg;
IWI_LOCK_ASSERT(sc);
/*
* Check for a change in rfkill state. We get an
* interrupt when a radio is disabled but not when
* it is enabled so we must poll for the latter.
*/
if (!iwi_getrfkill(sc)) {
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
-
- ieee80211_runtask(ic, &sc->sc_radiontask);
+ ieee80211_runtask(&sc->sc_ic, &sc->sc_radiontask);
return;
}
callout_reset(&sc->sc_rftimer, 2*hz, iwi_rfkill_poll, sc);
}
static void
iwi_radio_off(void *arg, int pending)
{
struct iwi_softc *sc = arg;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
IWI_LOCK_DECL;
device_printf(sc->sc_dev, "radio turned off\n");
ieee80211_notify_radio(ic, 0);
IWI_LOCK(sc);
iwi_stop_locked(sc);
iwi_rfkill_poll(sc);
IWI_UNLOCK(sc);
}
static int
iwi_sysctl_stats(SYSCTL_HANDLER_ARGS)
{
struct iwi_softc *sc = arg1;
uint32_t size, buf[128];
memset(buf, 0, sizeof buf);
if (!(sc->flags & IWI_FLAG_FW_INITED))
return SYSCTL_OUT(req, buf, sizeof buf);
size = min(CSR_READ_4(sc, IWI_CSR_TABLE0_SIZE), 128 - 1);
CSR_READ_REGION_4(sc, IWI_CSR_TABLE0_BASE, &buf[1], size);
return SYSCTL_OUT(req, buf, size);
}
static int
iwi_sysctl_radio(SYSCTL_HANDLER_ARGS)
{
struct iwi_softc *sc = arg1;
int val = !iwi_getrfkill(sc);
return SYSCTL_OUT(req, &val, sizeof val);
}
/*
* Add sysctl knobs.
*/
static void
iwi_sysctlattach(struct iwi_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "radio",
CTLTYPE_INT | CTLFLAG_RD, sc, 0, iwi_sysctl_radio, "I",
"radio transmitter switch state (0=off, 1=on)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats",
CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0, iwi_sysctl_stats, "S",
"statistics");
sc->bluetooth = 0;
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "bluetooth",
CTLFLAG_RW, &sc->bluetooth, 0, "bluetooth coexistence");
sc->antenna = IWI_ANTENNA_AUTO;
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "antenna",
CTLFLAG_RW, &sc->antenna, 0, "antenna (0=auto)");
}
/*
* LED support.
*
* Different cards have different capabilities. Some have three
* led's while others have only one. The linux ipw driver defines
* led's for link state (associated or not), band (11a, 11g, 11b),
* and for link activity. We use one led and vary the blink rate
* according to the tx/rx traffic a la the ath driver.
*/
static __inline uint32_t
iwi_toggle_event(uint32_t r)
{
return r &~ (IWI_RST_STANDBY | IWI_RST_GATE_ODMA |
IWI_RST_GATE_IDMA | IWI_RST_GATE_ADMA);
}
static uint32_t
iwi_read_event(struct iwi_softc *sc)
{
return MEM_READ_4(sc, IWI_MEM_EEPROM_EVENT);
}
static void
iwi_write_event(struct iwi_softc *sc, uint32_t v)
{
MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, v);
}
static void
iwi_led_done(void *arg)
{
struct iwi_softc *sc = arg;
sc->sc_blinking = 0;
}
/*
* Turn the activity LED off: flip the pin and then set a timer so no
* update will happen for the specified duration.
*/
static void
iwi_led_off(void *arg)
{
struct iwi_softc *sc = arg;
uint32_t v;
v = iwi_read_event(sc);
v &= ~sc->sc_ledpin;
iwi_write_event(sc, iwi_toggle_event(v));
callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, iwi_led_done, sc);
}
/*
* Blink the LED according to the specified on/off times.
*/
static void
iwi_led_blink(struct iwi_softc *sc, int on, int off)
{
uint32_t v;
v = iwi_read_event(sc);
v |= sc->sc_ledpin;
iwi_write_event(sc, iwi_toggle_event(v));
sc->sc_blinking = 1;
sc->sc_ledoff = off;
callout_reset(&sc->sc_ledtimer, on, iwi_led_off, sc);
}
static void
iwi_led_event(struct iwi_softc *sc, int event)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
/* NB: on/off times from the Atheros NDIS driver, w/ permission */
static const struct {
u_int rate; /* tx/rx iwi rate */
u_int16_t timeOn; /* LED on time (ms) */
u_int16_t timeOff; /* LED off time (ms) */
} blinkrates[] = {
{ IWI_RATE_OFDM54, 40, 10 },
{ IWI_RATE_OFDM48, 44, 11 },
{ IWI_RATE_OFDM36, 50, 13 },
{ IWI_RATE_OFDM24, 57, 14 },
{ IWI_RATE_OFDM18, 67, 16 },
{ IWI_RATE_OFDM12, 80, 20 },
{ IWI_RATE_DS11, 100, 25 },
{ IWI_RATE_OFDM9, 133, 34 },
{ IWI_RATE_OFDM6, 160, 40 },
{ IWI_RATE_DS5, 200, 50 },
{ 6, 240, 58 }, /* XXX 3Mb/s if it existed */
{ IWI_RATE_DS2, 267, 66 },
{ IWI_RATE_DS1, 400, 100 },
{ 0, 500, 130 }, /* unknown rate/polling */
};
uint32_t txrate;
int j = 0; /* XXX silence compiler */
sc->sc_ledevent = ticks; /* time of last event */
if (sc->sc_blinking) /* don't interrupt active blink */
return;
switch (event) {
case IWI_LED_POLL:
j = N(blinkrates)-1;
break;
case IWI_LED_TX:
/* read current transmission rate from adapter */
txrate = CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE);
if (blinkrates[sc->sc_txrix].rate != txrate) {
for (j = 0; j < N(blinkrates)-1; j++)
if (blinkrates[j].rate == txrate)
break;
sc->sc_txrix = j;
} else
j = sc->sc_txrix;
break;
case IWI_LED_RX:
if (blinkrates[sc->sc_rxrix].rate != sc->sc_rxrate) {
for (j = 0; j < N(blinkrates)-1; j++)
if (blinkrates[j].rate == sc->sc_rxrate)
break;
sc->sc_rxrix = j;
} else
j = sc->sc_rxrix;
break;
}
/* XXX beware of overflow */
iwi_led_blink(sc, (blinkrates[j].timeOn * hz) / 1000,
(blinkrates[j].timeOff * hz) / 1000);
#undef N
}
static int
iwi_sysctl_softled(SYSCTL_HANDLER_ARGS)
{
struct iwi_softc *sc = arg1;
int softled = sc->sc_softled;
int error;
error = sysctl_handle_int(oidp, &softled, 0, req);
if (error || !req->newptr)
return error;
softled = (softled != 0);
if (softled != sc->sc_softled) {
if (softled) {
uint32_t v = iwi_read_event(sc);
v &= ~sc->sc_ledpin;
iwi_write_event(sc, iwi_toggle_event(v));
}
sc->sc_softled = softled;
}
return 0;
}
static void
iwi_ledattach(struct iwi_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
sc->sc_blinking = 0;
sc->sc_ledstate = 1;
sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
callout_init_mtx(&sc->sc_ledtimer, &sc->sc_mtx, 0);
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
iwi_sysctl_softled, "I", "enable/disable software LED support");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0,
"pin setting to turn activity LED on");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0,
"idle time for inactivity LED (ticks)");
/* XXX for debugging */
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"nictype", CTLFLAG_RD, &sc->sc_nictype, 0,
"NIC type from EEPROM");
sc->sc_ledpin = IWI_RST_LED_ACTIVITY;
sc->sc_softled = 1;
sc->sc_nictype = (iwi_read_prom_word(sc, IWI_EEPROM_NIC) >> 8) & 0xff;
if (sc->sc_nictype == 1) {
/*
* NB: led's are reversed.
*/
sc->sc_ledpin = IWI_RST_LED_ASSOCIATED;
}
}
static void
iwi_scan_start(struct ieee80211com *ic)
{
/* ignore */
}
static void
iwi_set_channel(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct iwi_softc *sc = ifp->if_softc;
+ struct iwi_softc *sc = ic->ic_softc;
+
if (sc->fw_state == IWI_FW_IDLE)
iwi_setcurchan(sc, ic->ic_curchan->ic_ieee);
}
static void
iwi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
{
struct ieee80211vap *vap = ss->ss_vap;
- struct ifnet *ifp = vap->iv_ic->ic_ifp;
- struct iwi_softc *sc = ifp->if_softc;
+ struct iwi_softc *sc = vap->iv_ic->ic_softc;
IWI_LOCK_DECL;
IWI_LOCK(sc);
if (iwi_scanchan(sc, maxdwell, 0))
ieee80211_cancel_scan(vap);
IWI_UNLOCK(sc);
}
static void
iwi_scan_mindwell(struct ieee80211_scan_state *ss)
{
/* NB: don't try to abort scan; wait for firmware to finish */
}
static void
iwi_scan_end(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct iwi_softc *sc = ifp->if_softc;
+ struct iwi_softc *sc = ic->ic_softc;
IWI_LOCK_DECL;
IWI_LOCK(sc);
sc->flags &= ~IWI_FLAG_CHANNEL_SCAN;
/* NB: make sure we're still scanning */
if (sc->fw_state == IWI_FW_SCANNING)
iwi_cmd(sc, IWI_CMD_ABORT_SCAN, NULL, 0);
IWI_UNLOCK(sc);
}
Index: head/sys/dev/iwi/if_iwivar.h
===================================================================
--- head/sys/dev/iwi/if_iwivar.h (revision 287196)
+++ head/sys/dev/iwi/if_iwivar.h (revision 287197)
@@ -1,258 +1,261 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2004, 2005
* Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
struct iwi_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
uint8_t wr_antenna;
};
#define IWI_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | \
(1 << IEEE80211_RADIOTAP_ANTENNA))
struct iwi_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
};
#define IWI_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct iwi_cmd_ring {
bus_dma_tag_t desc_dmat;
bus_dmamap_t desc_map;
bus_addr_t physaddr;
struct iwi_cmd_desc *desc;
int count;
int queued;
int cur;
int next;
};
struct iwi_tx_data {
bus_dmamap_t map;
struct mbuf *m;
struct ieee80211_node *ni;
};
struct iwi_tx_ring {
bus_dma_tag_t desc_dmat;
bus_dma_tag_t data_dmat;
bus_dmamap_t desc_map;
bus_addr_t physaddr;
bus_addr_t csr_ridx;
bus_addr_t csr_widx;
struct iwi_tx_desc *desc;
struct iwi_tx_data *data;
int count;
int queued;
int cur;
int next;
};
struct iwi_rx_data {
bus_dmamap_t map;
bus_addr_t physaddr;
uint32_t reg;
struct mbuf *m;
};
struct iwi_rx_ring {
bus_dma_tag_t data_dmat;
struct iwi_rx_data *data;
int count;
int cur;
};
struct iwi_node {
struct ieee80211_node in_node;
int in_station;
#define IWI_MAX_IBSSNODE 32
};
struct iwi_fw {
const struct firmware *fp; /* image handle */
const char *data; /* firmware image data */
size_t size; /* firmware image size */
const char *name; /* associated image name */
};
struct iwi_vap {
struct ieee80211vap iwi_vap;
int (*iwi_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define IWI_VAP(vap) ((struct iwi_vap *)(vap))
struct iwi_softc {
- struct ifnet *sc_ifp;
- void (*sc_node_free)(struct ieee80211_node *);
+ struct mtx sc_mtx;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
- struct mtx sc_mtx;
+ void (*sc_node_free)(struct ieee80211_node *);
+
uint8_t sc_mcast[IEEE80211_ADDR_LEN];
struct unrhdr *sc_unr;
uint32_t flags;
#define IWI_FLAG_FW_INITED (1 << 0)
#define IWI_FLAG_BUSY (1 << 3) /* busy sending a command */
#define IWI_FLAG_ASSOCIATED (1 << 4) /* currently associated */
#define IWI_FLAG_CHANNEL_SCAN (1 << 5)
uint32_t fw_state;
#define IWI_FW_IDLE 0
#define IWI_FW_LOADING 1
#define IWI_FW_ASSOCIATING 2
#define IWI_FW_DISASSOCIATING 3
#define IWI_FW_SCANNING 4
struct iwi_cmd_ring cmdq;
struct iwi_tx_ring txq[WME_NUM_AC];
struct iwi_rx_ring rxq;
struct resource *irq;
struct resource *mem;
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
void *sc_ih;
/*
* The card needs external firmware images to work, which is made of a
* bootloader, microcode and firmware proper. In version 3.00 and
* above, all pieces are contained in a single image, preceded by a
* struct iwi_firmware_hdr indicating the size of the 3 pieces.
* Old firmware < 3.0 has separate boot and ucode, so we need to
* load all of them explicitly.
* To avoid issues related to fragmentation, we keep the block of
* dma-ble memory around until detach time, and reallocate it when
* it becomes too small. fw_dma_size is the size currently allocated.
*/
int fw_dma_size;
uint32_t fw_flags; /* allocation status */
#define IWI_FW_HAVE_DMAT 0x01
#define IWI_FW_HAVE_MAP 0x02
#define IWI_FW_HAVE_PHY 0x04
bus_dma_tag_t fw_dmat;
bus_dmamap_t fw_map;
bus_addr_t fw_physaddr;
void *fw_virtaddr;
enum ieee80211_opmode fw_mode; /* mode of current firmware */
struct iwi_fw fw_boot; /* boot firmware */
struct iwi_fw fw_uc; /* microcode */
struct iwi_fw fw_fw; /* operating mode support */
int curchan; /* current h/w channel # */
int antenna;
int bluetooth;
struct iwi_associate assoc;
struct iwi_wme_params wme[3];
u_int sc_scangen;
struct task sc_radiontask; /* radio on processing */
struct task sc_radiofftask; /* radio off processing */
struct task sc_restarttask; /* restart adapter processing */
struct task sc_disassoctask;
struct task sc_wmetask; /* set wme parameters */
struct task sc_monitortask;
- unsigned int sc_softled : 1, /* enable LED gpio status */
+ unsigned int sc_running : 1, /* initialized */
+ sc_softled : 1, /* enable LED gpio status */
sc_ledstate: 1, /* LED on/off state */
sc_blinking: 1; /* LED blink operation active */
u_int sc_nictype; /* NIC type from EEPROM */
u_int sc_ledpin; /* mask for activity LED */
u_int sc_ledidle; /* idle polling interval */
int sc_ledevent; /* time of last LED event */
u_int8_t sc_rxrate; /* current rx rate for LED */
u_int8_t sc_rxrix;
u_int8_t sc_txrate; /* current tx rate for LED */
u_int8_t sc_txrix;
u_int16_t sc_ledoff; /* off time for current blink */
struct callout sc_ledtimer; /* led off timer */
struct callout sc_wdtimer; /* watchdog timer */
struct callout sc_rftimer; /* rfkill timer */
int sc_tx_timer;
int sc_state_timer; /* firmware state timer */
int sc_busy_timer; /* firmware cmd timer */
struct iwi_rx_radiotap_header sc_rxtap;
struct iwi_tx_radiotap_header sc_txtap;
struct iwi_notif_link_quality sc_linkqual;
int sc_linkqual_valid;
};
#define IWI_STATE_BEGIN(_sc, _state) do { \
KASSERT(_sc->fw_state == IWI_FW_IDLE, \
("iwi firmware not idle, state %s", iwi_fw_states[_sc->fw_state]));\
_sc->fw_state = _state; \
_sc->sc_state_timer = 5; \
DPRINTF(("enter %s state\n", iwi_fw_states[_state])); \
} while (0)
#define IWI_STATE_END(_sc, _state) do { \
if (_sc->fw_state == _state) \
DPRINTF(("exit %s state\n", iwi_fw_states[_state])); \
else \
DPRINTF(("expected %s state, got %s\n", \
iwi_fw_states[_state], iwi_fw_states[_sc->fw_state])); \
_sc->fw_state = IWI_FW_IDLE; \
wakeup(_sc); \
_sc->sc_state_timer = 0; \
} while (0)
/*
* NB.: This models the only instance of async locking in iwi_init_locked
* and must be kept in sync.
*/
#define IWI_LOCK_INIT(sc) \
mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->sc_dev), \
MTX_NETWORK_LOCK, MTX_DEF)
#define IWI_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx)
#define IWI_LOCK_DECL int __waslocked = 0
#define IWI_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
#define IWI_LOCK(sc) do { \
if (!(__waslocked = mtx_owned(&(sc)->sc_mtx))) \
mtx_lock(&(sc)->sc_mtx); \
} while (0)
#define IWI_UNLOCK(sc) do { \
if (!__waslocked) \
mtx_unlock(&(sc)->sc_mtx); \
} while (0)
Index: head/sys/dev/iwm/if_iwm.c
===================================================================
--- head/sys/dev/iwm/if_iwm.c (revision 287196)
+++ head/sys/dev/iwm/if_iwm.c (revision 287197)
@@ -1,5124 +1,5028 @@
/* $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
/*
* Copyright (c) 2014 genua mbh <info@genua.de>
* Copyright (c) 2014 Fixup Software Ltd.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*-
* Based on BSD-licensed source modules in the Linux iwlwifi driver,
* which were used as the reference documentation for this implementation.
*
* Driver version we are currently based off of is
* Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
*
***********************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/linker.h>
#include <machine/bus.h>
#include <machine/endian.h>
#include <machine/resource.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/iwm/if_iwmreg.h>
#include <dev/iwm/if_iwmvar.h>
#include <dev/iwm/if_iwm_debug.h>
#include <dev/iwm/if_iwm_util.h>
#include <dev/iwm/if_iwm_binding.h>
#include <dev/iwm/if_iwm_phy_db.h>
#include <dev/iwm/if_iwm_mac_ctxt.h>
#include <dev/iwm/if_iwm_phy_ctxt.h>
#include <dev/iwm/if_iwm_time_event.h>
#include <dev/iwm/if_iwm_power.h>
#include <dev/iwm/if_iwm_scan.h>
#include <dev/iwm/if_iwm_pcie_trans.h>
const uint8_t iwm_nvm_channels[] = {
/* 2.4 GHz */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
/* 5 GHz */
36, 40, 44 , 48, 52, 56, 60, 64,
100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
149, 153, 157, 161, 165
};
#define IWM_NUM_2GHZ_CHANNELS 14
/*
* XXX For now, there's simply a fixed set of rate table entries
* that are populated.
*/
const struct iwm_rate {
uint8_t rate;
uint8_t plcp;
} iwm_rates[] = {
{ 2, IWM_RATE_1M_PLCP },
{ 4, IWM_RATE_2M_PLCP },
{ 11, IWM_RATE_5M_PLCP },
{ 22, IWM_RATE_11M_PLCP },
{ 12, IWM_RATE_6M_PLCP },
{ 18, IWM_RATE_9M_PLCP },
{ 24, IWM_RATE_12M_PLCP },
{ 36, IWM_RATE_18M_PLCP },
{ 48, IWM_RATE_24M_PLCP },
{ 72, IWM_RATE_36M_PLCP },
{ 96, IWM_RATE_48M_PLCP },
{ 108, IWM_RATE_54M_PLCP },
};
#define IWM_RIDX_CCK 0
#define IWM_RIDX_OFDM 4
#define IWM_RIDX_MAX (nitems(iwm_rates)-1)
#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
static int iwm_firmware_store_section(struct iwm_softc *,
enum iwm_ucode_type,
const uint8_t *, size_t);
static int iwm_set_default_calib(struct iwm_softc *, const void *);
static void iwm_fw_info_free(struct iwm_fw_info *);
static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
static void iwm_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
bus_size_t, bus_size_t);
static void iwm_dma_contig_free(struct iwm_dma_info *);
static int iwm_alloc_fwmem(struct iwm_softc *);
static void iwm_free_fwmem(struct iwm_softc *);
static int iwm_alloc_sched(struct iwm_softc *);
static void iwm_free_sched(struct iwm_softc *);
static int iwm_alloc_kw(struct iwm_softc *);
static void iwm_free_kw(struct iwm_softc *);
static int iwm_alloc_ict(struct iwm_softc *);
static void iwm_free_ict(struct iwm_softc *);
static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
int);
static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
static void iwm_enable_interrupts(struct iwm_softc *);
static void iwm_restore_interrupts(struct iwm_softc *);
static void iwm_disable_interrupts(struct iwm_softc *);
static void iwm_ict_reset(struct iwm_softc *);
static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
static void iwm_stop_device(struct iwm_softc *);
static void iwm_mvm_nic_config(struct iwm_softc *);
static int iwm_nic_rx_init(struct iwm_softc *);
static int iwm_nic_tx_init(struct iwm_softc *);
static int iwm_nic_init(struct iwm_softc *);
static void iwm_enable_txq(struct iwm_softc *, int, int);
static int iwm_post_alive(struct iwm_softc *);
static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
uint16_t, uint8_t *, uint16_t *);
static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
uint16_t *);
static void iwm_init_channel_map(struct iwm_softc *,
const uint16_t * const);
static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
const uint16_t *, const uint16_t *, uint8_t,
uint8_t);
struct iwm_nvm_section;
static int iwm_parse_nvm_sections(struct iwm_softc *,
struct iwm_nvm_section *);
static int iwm_nvm_init(struct iwm_softc *);
static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
const uint8_t *, uint32_t);
static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
static int iwm_fw_alive(struct iwm_softc *, uint32_t);
static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
enum iwm_ucode_type);
static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
static int iwm_rx_addbuf(struct iwm_softc *, int, int);
static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
static int iwm_mvm_get_signal_strength(struct iwm_softc *,
struct iwm_rx_phy_info *);
static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
struct iwm_rx_packet *,
struct iwm_rx_data *);
static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
struct iwm_rx_data *);
static void iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
struct iwm_rx_packet *,
struct iwm_node *);
static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
struct iwm_rx_data *);
static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
#if 0
static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
uint16_t);
#endif
static const struct iwm_rate *
iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
struct ieee80211_frame *, struct iwm_tx_cmd *);
static int iwm_tx(struct iwm_softc *, struct mbuf *,
struct ieee80211_node *, int);
static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
struct iwm_mvm_add_sta_cmd_v5 *);
static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
struct iwm_mvm_add_sta_cmd_v6 *,
int *);
static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
int);
static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
struct iwm_int_sta *,
const uint8_t *, uint16_t, uint16_t);
static int iwm_mvm_add_aux_sta(struct iwm_softc *);
static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
static int iwm_assoc(struct ieee80211vap *, struct iwm_softc *);
static int iwm_release(struct iwm_softc *, struct iwm_node *);
static struct ieee80211_node *
iwm_node_alloc(struct ieee80211vap *,
const uint8_t[IEEE80211_ADDR_LEN]);
static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
static int iwm_media_change(struct ifnet *);
static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void iwm_endscan_cb(void *, int);
static int iwm_init_hw(struct iwm_softc *);
-static void iwm_init(void *);
-static void iwm_init_locked(struct iwm_softc *);
-static void iwm_start(struct ifnet *);
-static void iwm_start_locked(struct ifnet *);
-static void iwm_stop(struct ifnet *, int);
-static void iwm_stop_locked(struct ifnet *);
+static void iwm_init(struct iwm_softc *);
+static void iwm_start(struct iwm_softc *);
+static void iwm_stop(struct iwm_softc *);
static void iwm_watchdog(void *);
-static int iwm_ioctl(struct ifnet *, u_long, iwm_caddr_t);
+static void iwm_parent(struct ieee80211com *);
#ifdef IWM_DEBUG
static const char *
iwm_desc_lookup(uint32_t);
static void iwm_nic_error(struct iwm_softc *);
#endif
static void iwm_notif_intr(struct iwm_softc *);
static void iwm_intr(void *);
static int iwm_attach(device_t);
static void iwm_preinit(void *);
static int iwm_detach_local(struct iwm_softc *sc, int);
static void iwm_init_task(void *);
static void iwm_radiotap_attach(struct iwm_softc *);
static struct ieee80211vap *
iwm_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int,
enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void iwm_vap_delete(struct ieee80211vap *);
static void iwm_scan_start(struct ieee80211com *);
static void iwm_scan_end(struct ieee80211com *);
static void iwm_update_mcast(struct ieee80211com *);
static void iwm_set_channel(struct ieee80211com *);
static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
static void iwm_scan_mindwell(struct ieee80211_scan_state *);
static int iwm_detach(device_t);
/*
* Firmware parser.
*/
static int
iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
{
const struct iwm_fw_cscheme_list *l = (const void *)data;
if (dlen < sizeof(*l) ||
dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
return EINVAL;
/* we don't actually store anything for now, always use s/w crypto */
return 0;
}
static int
iwm_firmware_store_section(struct iwm_softc *sc,
enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
{
struct iwm_fw_sects *fws;
struct iwm_fw_onesect *fwone;
if (type >= IWM_UCODE_TYPE_MAX)
return EINVAL;
if (dlen < sizeof(uint32_t))
return EINVAL;
fws = &sc->sc_fw.fw_sects[type];
if (fws->fw_count >= IWM_UCODE_SECT_MAX)
return EINVAL;
fwone = &fws->fw_sect[fws->fw_count];
/* first 32bit are device load offset */
memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
/* rest is data */
fwone->fws_data = data + sizeof(uint32_t);
fwone->fws_len = dlen - sizeof(uint32_t);
fws->fw_count++;
fws->fw_totlen += fwone->fws_len;
return 0;
}
/* iwlwifi: iwl-drv.c */
struct iwm_tlv_calib_data {
uint32_t ucode_type;
struct iwm_tlv_calib_ctrl calib;
} __packed;
static int
iwm_set_default_calib(struct iwm_softc *sc, const void *data)
{
const struct iwm_tlv_calib_data *def_calib = data;
uint32_t ucode_type = le32toh(def_calib->ucode_type);
if (ucode_type >= IWM_UCODE_TYPE_MAX) {
device_printf(sc->sc_dev,
"Wrong ucode_type %u for default "
"calibration.\n", ucode_type);
return EINVAL;
}
sc->sc_default_calib[ucode_type].flow_trigger =
def_calib->calib.flow_trigger;
sc->sc_default_calib[ucode_type].event_trigger =
def_calib->calib.event_trigger;
return 0;
}
static void
iwm_fw_info_free(struct iwm_fw_info *fw)
{
firmware_put(fw->fw_rawdata, FIRMWARE_UNLOAD);
fw->fw_rawdata = NULL;
fw->fw_rawsize = 0;
/* don't touch fw->fw_status */
memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
}
static int
iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
{
struct iwm_fw_info *fw = &sc->sc_fw;
const struct iwm_tlv_ucode_header *uhdr;
struct iwm_ucode_tlv tlv;
enum iwm_ucode_tlv_type tlv_type;
const struct firmware *fwp;
const uint8_t *data;
int error = 0;
size_t len;
if (fw->fw_status == IWM_FW_STATUS_DONE &&
ucode_type != IWM_UCODE_TYPE_INIT)
return 0;
while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfwp", 0);
fw->fw_status = IWM_FW_STATUS_INPROGRESS;
if (fw->fw_rawdata != NULL)
iwm_fw_info_free(fw);
/*
* Load firmware into driver memory.
* fw_rawdata and fw_rawsize will be set.
*/
IWM_UNLOCK(sc);
fwp = firmware_get(sc->sc_fwname);
if (fwp == NULL) {
device_printf(sc->sc_dev,
"could not read firmware %s (error %d)\n",
sc->sc_fwname, error);
IWM_LOCK(sc);
goto out;
}
IWM_LOCK(sc);
fw->fw_rawdata = fwp->data;
fw->fw_rawsize = fwp->datasize;
/*
* Parse firmware contents
*/
uhdr = (const void *)fw->fw_rawdata;
if (*(const uint32_t *)fw->fw_rawdata != 0
|| le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
device_printf(sc->sc_dev, "invalid firmware %s\n",
sc->sc_fwname);
error = EINVAL;
goto out;
}
sc->sc_fwver = le32toh(uhdr->ver);
data = uhdr->data;
len = fw->fw_rawsize - sizeof(*uhdr);
while (len >= sizeof(tlv)) {
size_t tlv_len;
const void *tlv_data;
memcpy(&tlv, data, sizeof(tlv));
tlv_len = le32toh(tlv.length);
tlv_type = le32toh(tlv.type);
len -= sizeof(tlv);
data += sizeof(tlv);
tlv_data = data;
if (len < tlv_len) {
device_printf(sc->sc_dev,
"firmware too short: %zu bytes\n",
len);
error = EINVAL;
goto parse_out;
}
switch ((int)tlv_type) {
case IWM_UCODE_TLV_PROBE_MAX_LEN:
if (tlv_len < sizeof(uint32_t)) {
device_printf(sc->sc_dev,
"%s: PROBE_MAX_LEN (%d) < sizeof(uint32_t)\n",
__func__,
(int) tlv_len);
error = EINVAL;
goto parse_out;
}
sc->sc_capa_max_probe_len
= le32toh(*(const uint32_t *)tlv_data);
/* limit it to something sensible */
if (sc->sc_capa_max_probe_len > (1<<16)) {
IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
"%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
"ridiculous\n", __func__);
error = EINVAL;
goto parse_out;
}
break;
case IWM_UCODE_TLV_PAN:
if (tlv_len) {
device_printf(sc->sc_dev,
"%s: IWM_UCODE_TLV_PAN: tlv_len (%d) > 0\n",
__func__,
(int) tlv_len);
error = EINVAL;
goto parse_out;
}
sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
break;
case IWM_UCODE_TLV_FLAGS:
if (tlv_len < sizeof(uint32_t)) {
device_printf(sc->sc_dev,
"%s: IWM_UCODE_TLV_FLAGS: tlv_len (%d) < sizeof(uint32_t)\n",
__func__,
(int) tlv_len);
error = EINVAL;
goto parse_out;
}
/*
* Apparently there can be many flags, but Linux driver
* parses only the first one, and so do we.
*
* XXX: why does this override IWM_UCODE_TLV_PAN?
* Intentional or a bug? Observations from
* current firmware file:
* 1) TLV_PAN is parsed first
* 2) TLV_FLAGS contains TLV_FLAGS_PAN
* ==> this resets TLV_PAN to itself... hnnnk
*/
sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
break;
case IWM_UCODE_TLV_CSCHEME:
if ((error = iwm_store_cscheme(sc,
tlv_data, tlv_len)) != 0) {
device_printf(sc->sc_dev,
"%s: iwm_store_cscheme(): returned %d\n",
__func__,
error);
goto parse_out;
}
break;
case IWM_UCODE_TLV_NUM_OF_CPU:
if (tlv_len != sizeof(uint32_t)) {
device_printf(sc->sc_dev,
"%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
__func__,
(int) tlv_len);
error = EINVAL;
goto parse_out;
}
if (le32toh(*(const uint32_t*)tlv_data) != 1) {
device_printf(sc->sc_dev,
"%s: driver supports "
"only TLV_NUM_OF_CPU == 1",
__func__);
error = EINVAL;
goto parse_out;
}
break;
case IWM_UCODE_TLV_SEC_RT:
if ((error = iwm_firmware_store_section(sc,
IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
device_printf(sc->sc_dev,
"%s: IWM_UCODE_TYPE_REGULAR: iwm_firmware_store_section() failed; %d\n",
__func__,
error);
goto parse_out;
}
break;
case IWM_UCODE_TLV_SEC_INIT:
if ((error = iwm_firmware_store_section(sc,
IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0) {
device_printf(sc->sc_dev,
"%s: IWM_UCODE_TYPE_INIT: iwm_firmware_store_section() failed; %d\n",
__func__,
error);
goto parse_out;
}
break;
case IWM_UCODE_TLV_SEC_WOWLAN:
if ((error = iwm_firmware_store_section(sc,
IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0) {
device_printf(sc->sc_dev,
"%s: IWM_UCODE_TYPE_WOW: iwm_firmware_store_section() failed; %d\n",
__func__,
error);
goto parse_out;
}
break;
case IWM_UCODE_TLV_DEF_CALIB:
if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
device_printf(sc->sc_dev,
"%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%d) < sizeof(iwm_tlv_calib_data) (%d)\n",
__func__,
(int) tlv_len,
(int) sizeof(struct iwm_tlv_calib_data));
error = EINVAL;
goto parse_out;
}
if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
device_printf(sc->sc_dev,
"%s: iwm_set_default_calib() failed: %d\n",
__func__,
error);
goto parse_out;
}
break;
case IWM_UCODE_TLV_PHY_SKU:
if (tlv_len != sizeof(uint32_t)) {
error = EINVAL;
device_printf(sc->sc_dev,
"%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%d) < sizeof(uint32_t)\n",
__func__,
(int) tlv_len);
goto parse_out;
}
sc->sc_fw_phy_config =
le32toh(*(const uint32_t *)tlv_data);
break;
case IWM_UCODE_TLV_API_CHANGES_SET:
case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
/* ignore, not used by current driver */
break;
default:
device_printf(sc->sc_dev,
"%s: unknown firmware section %d, abort\n",
__func__, tlv_type);
error = EINVAL;
goto parse_out;
}
len -= roundup(tlv_len, 4);
data += roundup(tlv_len, 4);
}
KASSERT(error == 0, ("unhandled error"));
parse_out:
if (error) {
device_printf(sc->sc_dev, "firmware parse error %d, "
"section type %d\n", error, tlv_type);
}
if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
device_printf(sc->sc_dev,
"device uses unsupported power ops\n");
error = ENOTSUP;
}
out:
if (error) {
fw->fw_status = IWM_FW_STATUS_NONE;
if (fw->fw_rawdata != NULL)
iwm_fw_info_free(fw);
} else
fw->fw_status = IWM_FW_STATUS_DONE;
wakeup(&sc->sc_fw);
return error;
}
/*
* DMA resource routines
*/
static void
iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
if (error != 0)
return;
KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
bus_size_t size, bus_size_t alignment)
{
int error;
dma->tag = NULL;
dma->size = size;
error = bus_dma_tag_create(tag, alignment,
0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
if (error != 0)
goto fail;
error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
if (error != 0)
goto fail;
error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
iwm_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
if (error != 0)
goto fail;
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
return 0;
fail: iwm_dma_contig_free(dma);
return error;
}
static void
iwm_dma_contig_free(struct iwm_dma_info *dma)
{
if (dma->map != NULL) {
if (dma->vaddr != NULL) {
bus_dmamap_sync(dma->tag, dma->map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dma->tag, dma->map);
bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
dma->vaddr = NULL;
}
bus_dmamap_destroy(dma->tag, dma->map);
dma->map = NULL;
}
if (dma->tag != NULL) {
bus_dma_tag_destroy(dma->tag);
dma->tag = NULL;
}
}
/* fwmem is used to load firmware onto the card */
static int
iwm_alloc_fwmem(struct iwm_softc *sc)
{
/* Must be aligned on a 16-byte boundary. */
return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
sc->sc_fwdmasegsz, 16);
}
static void
iwm_free_fwmem(struct iwm_softc *sc)
{
iwm_dma_contig_free(&sc->fw_dma);
}
/* tx scheduler rings. not used? */
static int
iwm_alloc_sched(struct iwm_softc *sc)
{
int rv;
/* TX scheduler rings must be aligned on a 1KB boundary. */
rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
return rv;
}
static void
iwm_free_sched(struct iwm_softc *sc)
{
iwm_dma_contig_free(&sc->sched_dma);
}
/* keep-warm page is used internally by the card. see iwl-fh.h for more info */
static int
iwm_alloc_kw(struct iwm_softc *sc)
{
return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
}
static void
iwm_free_kw(struct iwm_softc *sc)
{
iwm_dma_contig_free(&sc->kw_dma);
}
/* interrupt cause table */
static int
iwm_alloc_ict(struct iwm_softc *sc)
{
return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
}
static void
iwm_free_ict(struct iwm_softc *sc)
{
iwm_dma_contig_free(&sc->ict_dma);
}
static int
iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
{
bus_size_t size;
int i, error;
ring->cur = 0;
/* Allocate RX descriptors (256-byte aligned). */
size = IWM_RX_RING_COUNT * sizeof(uint32_t);
error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate RX ring DMA memory\n");
goto fail;
}
ring->desc = ring->desc_dma.vaddr;
/* Allocate RX status area (16-byte aligned). */
error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
sizeof(*ring->stat), 16);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate RX status DMA memory\n");
goto fail;
}
ring->stat = ring->stat_dma.vaddr;
/* Create RX buffer DMA tag. */
error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
&ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create RX buf DMA tag, error %d\n",
__func__, error);
goto fail;
}
/*
* Allocate and map RX buffers.
*/
for (i = 0; i < IWM_RX_RING_COUNT; i++) {
if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
goto fail;
}
}
return 0;
fail: iwm_free_rx_ring(sc, ring);
return error;
}
static void
iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
{
/* XXX print out if we can't lock the NIC? */
if (iwm_nic_lock(sc)) {
/* XXX handle if RX stop doesn't finish? */
(void) iwm_pcie_rx_stop(sc);
iwm_nic_unlock(sc);
}
ring->cur = 0;
}
static void
iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
{
int i;
iwm_dma_contig_free(&ring->desc_dma);
iwm_dma_contig_free(&ring->stat_dma);
for (i = 0; i < IWM_RX_RING_COUNT; i++) {
struct iwm_rx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->map != NULL) {
bus_dmamap_destroy(ring->data_dmat, data->map);
data->map = NULL;
}
}
if (ring->data_dmat != NULL) {
bus_dma_tag_destroy(ring->data_dmat);
ring->data_dmat = NULL;
}
}
static int
iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
{
bus_addr_t paddr;
bus_size_t size;
int i, error;
ring->qid = qid;
ring->queued = 0;
ring->cur = 0;
/* Allocate TX descriptors (256-byte aligned). */
size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate TX ring DMA memory\n");
goto fail;
}
ring->desc = ring->desc_dma.vaddr;
/*
* We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
* to allocate commands space for other rings.
*/
if (qid > IWM_MVM_CMD_QUEUE)
return 0;
size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate TX cmd DMA memory\n");
goto fail;
}
ring->cmd = ring->cmd_dma.vaddr;
error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
IWM_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
&ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
goto fail;
}
paddr = ring->cmd_dma.paddr;
for (i = 0; i < IWM_TX_RING_COUNT; i++) {
struct iwm_tx_data *data = &ring->data[i];
data->cmd_paddr = paddr;
data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
+ offsetof(struct iwm_tx_cmd, scratch);
paddr += sizeof(struct iwm_device_cmd);
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev,
"could not create TX buf DMA map\n");
goto fail;
}
}
KASSERT(paddr == ring->cmd_dma.paddr + size,
("invalid physical address"));
return 0;
fail: iwm_free_tx_ring(sc, ring);
return error;
}
static void
iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
{
int i;
for (i = 0; i < IWM_TX_RING_COUNT; i++) {
struct iwm_tx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
}
/* Clear TX descriptors. */
memset(ring->desc, 0, ring->desc_dma.size);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
sc->qfullmsk &= ~(1 << ring->qid);
ring->queued = 0;
ring->cur = 0;
}
static void
iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
{
int i;
iwm_dma_contig_free(&ring->desc_dma);
iwm_dma_contig_free(&ring->cmd_dma);
for (i = 0; i < IWM_TX_RING_COUNT; i++) {
struct iwm_tx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->map != NULL) {
bus_dmamap_destroy(ring->data_dmat, data->map);
data->map = NULL;
}
}
if (ring->data_dmat != NULL) {
bus_dma_tag_destroy(ring->data_dmat);
ring->data_dmat = NULL;
}
}
/*
* High-level hardware frobbing routines
*/
static void
iwm_enable_interrupts(struct iwm_softc *sc)
{
sc->sc_intmask = IWM_CSR_INI_SET_MASK;
IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
}
static void
iwm_restore_interrupts(struct iwm_softc *sc)
{
IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
}
static void
iwm_disable_interrupts(struct iwm_softc *sc)
{
/* disable interrupts */
IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
/* acknowledge all interrupts */
IWM_WRITE(sc, IWM_CSR_INT, ~0);
IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
}
static void
iwm_ict_reset(struct iwm_softc *sc)
{
iwm_disable_interrupts(sc);
/* Reset ICT table. */
memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
sc->ict_cur = 0;
/* Set physical address of ICT table (4KB aligned). */
IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
IWM_CSR_DRAM_INT_TBL_ENABLE
| IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
| sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
/* Switch to ICT interrupt mode in driver. */
sc->sc_flags |= IWM_FLAG_USE_ICT;
/* Re-enable interrupts. */
IWM_WRITE(sc, IWM_CSR_INT, ~0);
iwm_enable_interrupts(sc);
}
/* iwlwifi pcie/trans.c */
/*
* Since this .. hard-resets things, it's time to actually
* mark the first vap (if any) as having no mac context.
* It's annoying, but since the driver is potentially being
* stop/start'ed whilst active (thanks openbsd port!) we
* have to correctly track this.
*/
static void
iwm_stop_device(struct iwm_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
int chnl, ntries;
int qid;
/* tell the device to stop sending interrupts */
iwm_disable_interrupts(sc);
/*
* FreeBSD-local: mark the first vap as not-uploaded,
* so the next transition through auth/assoc
* will correctly populate the MAC context.
*/
if (vap) {
struct iwm_vap *iv = IWM_VAP(vap);
iv->is_uploaded = 0;
}
/* device going down, Stop using ICT table */
sc->sc_flags &= ~IWM_FLAG_USE_ICT;
/* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
/* Stop all DMA channels. */
if (iwm_nic_lock(sc)) {
for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
IWM_WRITE(sc,
IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
for (ntries = 0; ntries < 200; ntries++) {
uint32_t r;
r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
chnl))
break;
DELAY(20);
}
}
iwm_nic_unlock(sc);
}
/* Stop RX ring. */
iwm_reset_rx_ring(sc, &sc->rxq);
/* Reset all TX rings. */
for (qid = 0; qid < nitems(sc->txq); qid++)
iwm_reset_tx_ring(sc, &sc->txq[qid]);
/*
* Power-down device's busmaster DMA clocks
*/
iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
DELAY(5);
/* Make sure (redundant) we've released our request to stay awake */
IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwm_apm_stop(sc);
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
*/
iwm_disable_interrupts(sc);
/* stop and reset the on-board processor */
IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
/*
* Even if we stop the HW, we still want the RF kill
* interrupt
*/
iwm_enable_rfkill_int(sc);
iwm_check_rfkill(sc);
}
/* iwlwifi: mvm/ops.c */
static void
iwm_mvm_nic_config(struct iwm_softc *sc)
{
uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
uint32_t reg_val = 0;
radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
IWM_FW_PHY_CFG_RADIO_TYPE_POS;
radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
IWM_FW_PHY_CFG_RADIO_STEP_POS;
radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
IWM_FW_PHY_CFG_RADIO_DASH_POS;
/* SKU control */
reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
/* radio configuration */
reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
IWM_DPRINTF(sc, IWM_DEBUG_RESET,
"Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
radio_cfg_step, radio_cfg_dash);
/*
* W/A : NIC is stuck in a reset state after Early PCIe power off
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
static int
iwm_nic_rx_init(struct iwm_softc *sc)
{
if (!iwm_nic_lock(sc))
return EBUSY;
/*
* Initialize RX ring. This is from the iwn driver.
*/
memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
/* stop DMA */
IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Set physical address of RX ring (256-byte aligned). */
IWM_WRITE(sc,
IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
/* Set physical address of RX status (16-byte aligned). */
IWM_WRITE(sc,
IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
/* Enable RX. */
/*
* Note: Linux driver also sets this:
* (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
*
* It causes weird behavior. YMMV.
*/
IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
/* W/A for interrupt coalescing bug in 7260 and 3160 */
if (sc->host_interrupt_operation_mode)
IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
/*
* Thus sayeth el jefe (iwlwifi) via a comment:
*
* This value should initially be 0 (before preparing any
* RBs), should be 8 after preparing the first 8 RBs (for example)
*/
IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
iwm_nic_unlock(sc);
return 0;
}
static int
iwm_nic_tx_init(struct iwm_softc *sc)
{
int qid;
if (!iwm_nic_lock(sc))
return EBUSY;
/* Deactivate TX scheduler. */
iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
/* Set physical address of "keep warm" page (16-byte aligned). */
IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
/* Initialize TX rings. */
for (qid = 0; qid < nitems(sc->txq); qid++) {
struct iwm_tx_ring *txq = &sc->txq[qid];
/* Set physical address of TX ring (256-byte aligned). */
IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
txq->desc_dma.paddr >> 8);
IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
"%s: loading ring %d descriptors (%p) at %lx\n",
__func__,
qid, txq->desc,
(unsigned long) (txq->desc_dma.paddr >> 8));
}
iwm_nic_unlock(sc);
return 0;
}
static int
iwm_nic_init(struct iwm_softc *sc)
{
int error;
iwm_apm_init(sc);
iwm_set_pwr(sc);
iwm_mvm_nic_config(sc);
if ((error = iwm_nic_rx_init(sc)) != 0)
return error;
/*
* Ditto for TX, from iwn
*/
if ((error = iwm_nic_tx_init(sc)) != 0)
return error;
IWM_DPRINTF(sc, IWM_DEBUG_RESET,
"%s: shadow registers enabled\n", __func__);
IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
return 0;
}
enum iwm_mvm_tx_fifo {
IWM_MVM_TX_FIFO_BK = 0,
IWM_MVM_TX_FIFO_BE,
IWM_MVM_TX_FIFO_VI,
IWM_MVM_TX_FIFO_VO,
IWM_MVM_TX_FIFO_MCAST = 5,
};
const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
IWM_MVM_TX_FIFO_VO,
IWM_MVM_TX_FIFO_VI,
IWM_MVM_TX_FIFO_BE,
IWM_MVM_TX_FIFO_BK,
};
static void
iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
{
if (!iwm_nic_lock(sc)) {
device_printf(sc->sc_dev,
"%s: cannot enable txq %d\n",
__func__,
qid);
return; /* XXX return EBUSY */
}
/* unactivate before configuration */
iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
(0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
| (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
if (qid != IWM_MVM_CMD_QUEUE) {
iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
}
iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
/* Set scheduler window size and frame limit. */
iwm_write_mem32(sc,
sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
sizeof(uint32_t),
((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
(1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
(1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
IWM_SCD_QUEUE_STTS_REG_MSK);
iwm_nic_unlock(sc);
IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
"%s: enabled txq %d FIFO %d\n",
__func__, qid, fifo);
}
static int
iwm_post_alive(struct iwm_softc *sc)
{
int nwords;
int error, chnl;
if (!iwm_nic_lock(sc))
return EBUSY;
if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
device_printf(sc->sc_dev,
"%s: sched addr mismatch",
__func__);
error = EINVAL;
goto out;
}
iwm_ict_reset(sc);
/* Clear TX scheduler state in SRAM. */
nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
/ sizeof(uint32_t);
error = iwm_write_mem(sc,
sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
NULL, nwords);
if (error)
goto out;
/* Set physical address of TX scheduler rings (1KB aligned). */
iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
/* enable command channel */
iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
/* Enable DMA channels. */
for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
}
IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
out:
iwm_nic_unlock(sc);
return error;
}
/*
* NVM read access and content parsing. We do not support
* external NVM or writing NVM.
* iwlwifi/mvm/nvm.c
*/
/* list of NVM sections we are allowed/need to read */
const int nvm_to_read[] = {
IWM_NVM_SECTION_TYPE_HW,
IWM_NVM_SECTION_TYPE_SW,
IWM_NVM_SECTION_TYPE_CALIBRATION,
IWM_NVM_SECTION_TYPE_PRODUCTION,
};
/* Default NVM size to read */
#define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
#define IWM_MAX_NVM_SECTION_SIZE 7000
#define IWM_NVM_WRITE_OPCODE 1
#define IWM_NVM_READ_OPCODE 0
static int
iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
{
offset = 0;
struct iwm_nvm_access_cmd nvm_access_cmd = {
.offset = htole16(offset),
.length = htole16(length),
.type = htole16(section),
.op_code = IWM_NVM_READ_OPCODE,
};
struct iwm_nvm_access_resp *nvm_resp;
struct iwm_rx_packet *pkt;
struct iwm_host_cmd cmd = {
.id = IWM_NVM_ACCESS_CMD,
.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
IWM_CMD_SEND_IN_RFKILL,
.data = { &nvm_access_cmd, },
};
int ret, bytes_read, offset_read;
uint8_t *resp_data;
cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
ret = iwm_send_cmd(sc, &cmd);
if (ret)
return ret;
pkt = cmd.resp_pkt;
if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
device_printf(sc->sc_dev,
"%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
__func__, pkt->hdr.flags);
ret = EIO;
goto exit;
}
/* Extract NVM response */
nvm_resp = (void *)pkt->data;
ret = le16toh(nvm_resp->status);
bytes_read = le16toh(nvm_resp->length);
offset_read = le16toh(nvm_resp->offset);
resp_data = nvm_resp->data;
if (ret) {
device_printf(sc->sc_dev,
"%s: NVM access command failed with status %d\n",
__func__, ret);
ret = EINVAL;
goto exit;
}
if (offset_read != offset) {
device_printf(sc->sc_dev,
"%s: NVM ACCESS response with invalid offset %d\n",
__func__, offset_read);
ret = EINVAL;
goto exit;
}
memcpy(data + offset, resp_data, bytes_read);
*len = bytes_read;
exit:
iwm_free_resp(sc, &cmd);
return ret;
}
/*
* Reads an NVM section completely.
* NICs prior to 7000 family doesn't have a real NVM, but just read
* section 0 which is the EEPROM. Because the EEPROM reading is unlimited
* by uCode, we need to manually check in this case that we don't
* overflow and try to read more than the EEPROM size.
* For 7000 family NICs, we supply the maximal size we can read, and
* the uCode fills the response with as much data as we can,
* without overflowing, so no check is needed.
*/
static int
iwm_nvm_read_section(struct iwm_softc *sc,
uint16_t section, uint8_t *data, uint16_t *len)
{
uint16_t length, seglen;
int error;
/* Set nvm section read length */
length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
*len = 0;
/* Read the NVM until exhausted (reading less than requested) */
while (seglen == length) {
error = iwm_nvm_read_chunk(sc,
section, *len, length, data, &seglen);
if (error) {
device_printf(sc->sc_dev,
"Cannot read NVM from section "
"%d offset %d, length %d\n",
section, *len, length);
return error;
}
*len += seglen;
}
IWM_DPRINTF(sc, IWM_DEBUG_RESET,
"NVM section %d read completed\n", section);
return 0;
}
/*
* BEGIN IWM_NVM_PARSE
*/
/* iwlwifi/iwl-nvm-parse.c */
/* NVM offsets (in words) definitions */
enum wkp_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
IWM_HW_ADDR = 0x15,
/* NVM SW-Section offset (in words) definitions */
IWM_NVM_SW_SECTION = 0x1C0,
IWM_NVM_VERSION = 0,
IWM_RADIO_CFG = 1,
IWM_SKU = 2,
IWM_N_HW_ADDRS = 3,
IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
/* NVM calibration section offset (in words) definitions */
IWM_NVM_CALIB_SECTION = 0x2B8,
IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
};
/* SKU Capabilities (actual values from NVM definition) */
enum nvm_sku_bits {
IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
};
/* radio config bits (actual values from NVM definition) */
#define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
#define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
#define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
#define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
#define DEFAULT_MAX_TX_POWER 16
/**
* enum iwm_nvm_channel_flags - channel flags in NVM
* @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
* @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
* @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
* @IWM_NVM_CHANNEL_RADAR: radar detection required
* @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
* @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
* @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
* @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
* @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
*/
enum iwm_nvm_channel_flags {
IWM_NVM_CHANNEL_VALID = (1 << 0),
IWM_NVM_CHANNEL_IBSS = (1 << 1),
IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
IWM_NVM_CHANNEL_RADAR = (1 << 4),
IWM_NVM_CHANNEL_DFS = (1 << 7),
IWM_NVM_CHANNEL_WIDE = (1 << 8),
IWM_NVM_CHANNEL_40MHZ = (1 << 9),
IWM_NVM_CHANNEL_80MHZ = (1 << 10),
IWM_NVM_CHANNEL_160MHZ = (1 << 11),
};
/*
* Add a channel to the net80211 channel list.
*
* ieee is the ieee channel number
* ch_idx is channel index.
* mode is the channel mode - CHAN_A, CHAN_B, CHAN_G.
* ch_flags is the iwm channel flags.
*
* Return 0 on OK, < 0 on error.
*/
static int
iwm_init_net80211_channel(struct iwm_softc *sc, int ieee, int ch_idx,
int mode, uint16_t ch_flags)
{
/* XXX for now, no overflow checking! */
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
int is_5ghz, flags;
struct ieee80211_channel *channel;
channel = &ic->ic_channels[ic->ic_nchans++];
channel->ic_ieee = ieee;
is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
if (!is_5ghz) {
flags = IEEE80211_CHAN_2GHZ;
channel->ic_flags = mode;
} else {
flags = IEEE80211_CHAN_5GHZ;
channel->ic_flags = mode;
}
channel->ic_freq = ieee80211_ieee2mhz(ieee, flags);
if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
return (0);
}
static void
iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
{
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct iwm_nvm_data *data = &sc->sc_nvm;
int ch_idx;
uint16_t ch_flags;
int hw_value;
for (ch_idx = 0; ch_idx < nitems(iwm_nvm_channels); ch_idx++) {
ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
!data->sku_cap_band_52GHz_enable)
ch_flags &= ~IWM_NVM_CHANNEL_VALID;
if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
iwm_nvm_channels[ch_idx],
ch_flags,
(ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
"5.2" : "2.4");
continue;
}
hw_value = iwm_nvm_channels[ch_idx];
/* 5GHz? */
if (ch_idx >= IWM_NUM_2GHZ_CHANNELS) {
(void) iwm_init_net80211_channel(sc, hw_value,
ch_idx,
IEEE80211_CHAN_A,
ch_flags);
} else {
(void) iwm_init_net80211_channel(sc, hw_value,
ch_idx,
IEEE80211_CHAN_B,
ch_flags);
/* If it's not channel 13, also add 11g */
if (hw_value != 13)
(void) iwm_init_net80211_channel(sc, hw_value,
ch_idx,
IEEE80211_CHAN_G,
ch_flags);
}
IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
"Ch. %d Flags %x [%sGHz] - Added\n",
iwm_nvm_channels[ch_idx],
ch_flags,
(ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
"5.2" : "2.4");
}
ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
}
static int
iwm_parse_nvm_data(struct iwm_softc *sc,
const uint16_t *nvm_hw, const uint16_t *nvm_sw,
const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
{
struct iwm_nvm_data *data = &sc->sc_nvm;
uint8_t hw_addr[IEEE80211_ADDR_LEN];
uint16_t radio_cfg, sku;
data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
sku = le16_to_cpup(nvm_sw + IWM_SKU);
data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
data->sku_cap_11n_enable = 0;
if (!data->valid_tx_ant || !data->valid_rx_ant) {
device_printf(sc->sc_dev,
"%s: invalid antennas (0x%x, 0x%x)\n",
__func__, data->valid_tx_ant,
data->valid_rx_ant);
return EINVAL;
}
data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
/* The byte order is little endian 16 bit, meaning 214365 */
IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
data->hw_addr[0] = hw_addr[1];
data->hw_addr[1] = hw_addr[0];
data->hw_addr[2] = hw_addr[3];
data->hw_addr[3] = hw_addr[2];
data->hw_addr[4] = hw_addr[5];
data->hw_addr[5] = hw_addr[4];
iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
data->calib_version = 255; /* TODO:
this value will prevent some checks from
failing, we need to check if this
field is still needed, and if it does,
where is it in the NVM */
return 0;
}
/*
* END NVM PARSE
*/
struct iwm_nvm_section {
uint16_t length;
const uint8_t *data;
};
static int
iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
{
const uint16_t *hw, *sw, *calib;
/* Checking for required sections */
if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
!sections[IWM_NVM_SECTION_TYPE_HW].data) {
device_printf(sc->sc_dev,
"%s: Can't parse empty NVM sections\n",
__func__);
return ENOENT;
}
hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
return iwm_parse_nvm_data(sc, hw, sw, calib,
IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
}
static int
iwm_nvm_init(struct iwm_softc *sc)
{
struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
int i, section, error;
uint16_t len;
uint8_t *nvm_buffer, *temp;
/* Read From FW NVM */
IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
"%s: Read NVM\n",
__func__);
/* TODO: find correct NVM max size for a section */
nvm_buffer = malloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_NOWAIT);
if (nvm_buffer == NULL)
return (ENOMEM);
for (i = 0; i < nitems(nvm_to_read); i++) {
section = nvm_to_read[i];
KASSERT(section <= nitems(nvm_sections),
("too many sections"));
error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
if (error)
break;
temp = malloc(len, M_DEVBUF, M_NOWAIT);
if (temp == NULL) {
error = ENOMEM;
break;
}
memcpy(temp, nvm_buffer, len);
nvm_sections[section].data = temp;
nvm_sections[section].length = len;
}
free(nvm_buffer, M_DEVBUF);
if (error)
return error;
return iwm_parse_nvm_sections(sc, nvm_sections);
}
/*
* Firmware loading gunk. This is kind of a weird hybrid between the
* iwn driver and the Linux iwlwifi driver.
*/
static int
iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
const uint8_t *section, uint32_t byte_cnt)
{
struct iwm_dma_info *dma = &sc->fw_dma;
int error;
/* Copy firmware section into pre-allocated DMA-safe memory. */
memcpy(dma->vaddr, section, byte_cnt);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
if (!iwm_nic_lock(sc))
return EBUSY;
sc->sc_fw_chunk_done = 0;
IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
dst_addr);
IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
(iwm_get_dma_hi_addr(dma->paddr)
<< IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
iwm_nic_unlock(sc);
/* wait 1s for this segment to load */
while (!sc->sc_fw_chunk_done)
if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
break;
return error;
}
static int
iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
{
struct iwm_fw_sects *fws;
int error, i, w;
const void *data;
uint32_t dlen;
uint32_t offset;
sc->sc_uc.uc_intr = 0;
fws = &sc->sc_fw.fw_sects[ucode_type];
for (i = 0; i < fws->fw_count; i++) {
data = fws->fw_sect[i].fws_data;
dlen = fws->fw_sect[i].fws_len;
offset = fws->fw_sect[i].fws_devoff;
IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
"LOAD FIRMWARE type %d offset %u len %d\n",
ucode_type, offset, dlen);
error = iwm_firmware_load_chunk(sc, offset, data, dlen);
if (error) {
device_printf(sc->sc_dev,
"%s: chunk %u of %u returned error %02d\n",
__func__, i, fws->fw_count, error);
return error;
}
}
/* wait for the firmware to load */
IWM_WRITE(sc, IWM_CSR_RESET, 0);
for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
}
return error;
}
/* iwlwifi: pcie/trans.c */
static int
iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
{
int error;
IWM_WRITE(sc, IWM_CSR_INT, ~0);
if ((error = iwm_nic_init(sc)) != 0) {
device_printf(sc->sc_dev, "unable to init nic\n");
return error;
}
/* make sure rfkill handshake bits are cleared */
IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
/* clear (again), then enable host interrupts */
IWM_WRITE(sc, IWM_CSR_INT, ~0);
iwm_enable_interrupts(sc);
/* really make sure rfkill handshake bits are cleared */
/* maybe we should write a few times more? just to make sure */
IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */
return iwm_load_firmware(sc, ucode_type);
}
static int
iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
{
return iwm_post_alive(sc);
}
static int
iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
{
struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
.valid = htole32(valid_tx_ant),
};
return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
}
/* iwlwifi: mvm/fw.c */
static int
iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
{
struct iwm_phy_cfg_cmd phy_cfg_cmd;
enum iwm_ucode_type ucode_type = sc->sc_uc_current;
/* Set parameters */
phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
phy_cfg_cmd.calib_control.event_trigger =
sc->sc_default_calib[ucode_type].event_trigger;
phy_cfg_cmd.calib_control.flow_trigger =
sc->sc_default_calib[ucode_type].flow_trigger;
IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
"Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}
static int
iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
enum iwm_ucode_type ucode_type)
{
enum iwm_ucode_type old_type = sc->sc_uc_current;
int error;
if ((error = iwm_read_firmware(sc, ucode_type)) != 0)
return error;
sc->sc_uc_current = ucode_type;
error = iwm_start_fw(sc, ucode_type);
if (error) {
sc->sc_uc_current = old_type;
return error;
}
return iwm_fw_alive(sc, sc->sched_base);
}
/*
* mvm misc bits
*/
/*
* follows iwlwifi/fw.c
*/
static int
iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
{
int error;
/* do not operate with rfkill switch turned on */
if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
device_printf(sc->sc_dev,
"radio is disabled by hardware switch\n");
return EPERM;
}
sc->sc_init_complete = 0;
if ((error = iwm_mvm_load_ucode_wait_alive(sc,
IWM_UCODE_TYPE_INIT)) != 0)
return error;
if (justnvm) {
if ((error = iwm_nvm_init(sc)) != 0) {
device_printf(sc->sc_dev, "failed to read nvm\n");
return error;
}
- IEEE80211_ADDR_COPY(sc->sc_bssid, &sc->sc_nvm.hw_addr);
+ IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, &sc->sc_nvm.hw_addr);
sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
+ sc->sc_capa_max_probe_len
+ IWM_MAX_NUM_SCAN_CHANNELS
* sizeof(struct iwm_scan_channel);
sc->sc_scan_cmd = malloc(sc->sc_scan_cmd_len, M_DEVBUF,
M_NOWAIT);
if (sc->sc_scan_cmd == NULL)
return (ENOMEM);
return 0;
}
/* Send TX valid antennas before triggering calibrations */
if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
return error;
/*
* Send phy configurations command to init uCode
* to start the 16.0 uCode init image internal calibrations.
*/
if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
device_printf(sc->sc_dev,
"%s: failed to run internal calibration: %d\n",
__func__, error);
return error;
}
/*
* Nothing to do but wait for the init complete notification
* from the firmware
*/
while (!sc->sc_init_complete)
if ((error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
0, "iwminit", 2*hz)) != 0)
break;
return error;
}
/*
* receive side
*/
/* (re)stock rx ring, called at init-time and at runtime */
static int
iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
{
struct iwm_rx_ring *ring = &sc->rxq;
struct iwm_rx_data *data = &ring->data[idx];
struct mbuf *m;
int error;
bus_addr_t paddr;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
if (m == NULL)
return ENOBUFS;
if (data->m != NULL)
bus_dmamap_unload(ring->data_dmat, data->map);
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create RX buf DMA map, error %d\n",
__func__, error);
goto fail;
}
data->m = m;
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
&paddr, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev,
"%s: can't not map mbuf, error %d\n", __func__,
error);
goto fail;
}
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
/* Update RX descriptor. */
ring->desc[idx] = htole32(paddr >> 8);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
return 0;
fail:
return error;
}
/* iwlwifi: mvm/rx.c */
#define IWM_RSSI_OFFSET 50
static int
iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
{
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
uint32_t agc_a, agc_b;
uint32_t val;
val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
/*
* dBm = rssi dB - agc dB - constant.
* Higher AGC (higher radio gain) means lower signal.
*/
rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
IWM_DPRINTF(sc, IWM_DEBUG_RECV,
"Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
return max_rssi_dbm;
}
/* iwlwifi: mvm/rx.c */
/*
* iwm_mvm_get_signal_strength - use new rx PHY INFO API
* values are reported by the fw as positive values - need to negate
* to obtain their dBM. Account for missing antennas by replacing 0
* values by -256dBm: practically 0 power and a non-feasible 8 bit value.
*/
static int
iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
{
int energy_a, energy_b, energy_c, max_energy;
uint32_t val;
val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
IWM_RX_INFO_ENERGY_ANT_A_POS;
energy_a = energy_a ? -energy_a : -256;
energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
IWM_RX_INFO_ENERGY_ANT_B_POS;
energy_b = energy_b ? -energy_b : -256;
energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
IWM_RX_INFO_ENERGY_ANT_C_POS;
energy_c = energy_c ? -energy_c : -256;
max_energy = MAX(energy_a, energy_b);
max_energy = MAX(max_energy, energy_c);
IWM_DPRINTF(sc, IWM_DEBUG_RECV,
"energy In A %d B %d C %d , and max %d\n",
energy_a, energy_b, energy_c, max_energy);
return max_energy;
}
static void
iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
{
struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
}
/*
* Retrieve the average noise (in dBm) among receivers.
*/
static int
iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
{
int i, total, nbant, noise;
total = nbant = noise = 0;
for (i = 0; i < 3; i++) {
noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
if (noise) {
total += noise;
nbant++;
}
}
/* There should be at least one antenna but check anyway. */
return (nbant == 0) ? -127 : (total / nbant) - 107;
}
/*
* iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
*
* Handles the actual data of the Rx packet from the fw
*/
static void
iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
{
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct ieee80211_rx_stats rxs;
struct mbuf *m;
struct iwm_rx_phy_info *phy_info;
struct iwm_rx_mpdu_res_start *rx_res;
uint32_t len;
uint32_t rx_pkt_status;
int rssi;
bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
phy_info = &sc->sc_last_phy_info;
rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
len = le16toh(rx_res->byte_count);
rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
m = data->m;
m->m_data = pkt->data + sizeof(*rx_res);
m->m_pkthdr.len = m->m_len = len;
if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
device_printf(sc->sc_dev,
"dsp size out of range [0,20]: %d\n",
phy_info->cfg_phy_cnt);
return;
}
if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
IWM_DPRINTF(sc, IWM_DEBUG_RECV,
"Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
return; /* drop */
}
if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
rssi = iwm_mvm_get_signal_strength(sc, phy_info);
} else {
rssi = iwm_mvm_calc_rssi(sc, phy_info);
}
rssi = (0 - IWM_MIN_DBM) + rssi; /* normalize */
rssi = MIN(rssi, sc->sc_max_rssi); /* clip to max. 100% */
/* replenish ring for the buffer we're going to feed to the sharks */
if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
__func__);
return;
}
ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
IWM_DPRINTF(sc, IWM_DEBUG_RECV,
"%s: phy_info: channel=%d, flags=0x%08x\n",
__func__,
le16toh(phy_info->channel),
le16toh(phy_info->phy_flags));
/*
* Populate an RX state struct with the provided information.
*/
bzero(&rxs, sizeof(rxs));
rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
rxs.c_ieee = le16toh(phy_info->channel);
if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
} else {
rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
}
rxs.rssi = rssi - sc->sc_noise;
rxs.nf = sc->sc_noise;
if (ieee80211_radiotap_active_vap(vap)) {
struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
tap->wr_chan_freq = htole16(rxs.c_freq);
/* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
tap->wr_dbm_antsignal = (int8_t)rssi;
tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
tap->wr_tsft = phy_info->system_timestamp;
switch (phy_info->rate) {
/* CCK rates. */
case 10: tap->wr_rate = 2; break;
case 20: tap->wr_rate = 4; break;
case 55: tap->wr_rate = 11; break;
case 110: tap->wr_rate = 22; break;
/* OFDM rates. */
case 0xd: tap->wr_rate = 12; break;
case 0xf: tap->wr_rate = 18; break;
case 0x5: tap->wr_rate = 24; break;
case 0x7: tap->wr_rate = 36; break;
case 0x9: tap->wr_rate = 48; break;
case 0xb: tap->wr_rate = 72; break;
case 0x1: tap->wr_rate = 96; break;
case 0x3: tap->wr_rate = 108; break;
/* Unknown rate: should not happen. */
default: tap->wr_rate = 0;
}
}
IWM_UNLOCK(sc);
if (ni != NULL) {
IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
ieee80211_input_mimo(ni, m, &rxs);
ieee80211_free_node(ni);
} else {
IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
ieee80211_input_mimo_all(ic, m, &rxs);
}
IWM_LOCK(sc);
}
static void
iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
struct iwm_node *in)
{
- struct ifnet *ifp = sc->sc_ifp;
struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
+ struct ieee80211vap *vap = in->in_ni.ni_vap;
int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
int failack = tx_resp->failure_frame;
KASSERT(tx_resp->frame_count == 1, ("too many frames"));
/* Update rate control statistics. */
if (status != IWM_TX_STATUS_SUCCESS &&
status != IWM_TX_STATUS_DIRECT_DONE) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
+ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, 1);
+ ieee80211_ratectl_tx_complete(vap, &in->in_ni,
IEEE80211_RATECTL_TX_FAILURE, &failack, NULL);
} else {
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- ieee80211_ratectl_tx_complete(in->in_ni.ni_vap, &in->in_ni,
+ if_inc_counter(vap->iv_ifp, IFCOUNTER_OPACKETS, 1);
+ ieee80211_ratectl_tx_complete(vap, &in->in_ni,
IEEE80211_RATECTL_TX_SUCCESS, &failack, NULL);
}
}
static void
iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
{
- struct ifnet *ifp = sc->sc_ifp;
struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
int idx = cmd_hdr->idx;
int qid = cmd_hdr->qid;
struct iwm_tx_ring *ring = &sc->txq[qid];
struct iwm_tx_data *txd = &ring->data[idx];
struct iwm_node *in = txd->in;
if (txd->done) {
device_printf(sc->sc_dev,
"%s: got tx interrupt that's already been handled!\n",
__func__);
return;
}
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
sc->sc_tx_timer = 0;
iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
/* Unmap and free mbuf. */
bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, txd->map);
m_freem(txd->m);
IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
"free txd %p, in %p\n", txd, txd->in);
KASSERT(txd->done == 0, ("txd not done"));
txd->done = 1;
KASSERT(txd->in, ("txd without node"));
txd->m = NULL;
txd->in = NULL;
ieee80211_free_node((struct ieee80211_node *)in);
if (--ring->queued < IWM_TX_RING_LOMARK) {
sc->qfullmsk &= ~(1 << ring->qid);
- if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_DRV_OACTIVE)) {
- ifp->if_flags &= ~IFF_DRV_OACTIVE;
+ if (sc->qfullmsk == 0) {
/*
* Well, we're in interrupt context, but then again
* I guess net80211 does all sorts of stunts in
* interrupt context, so maybe this is no biggie.
*/
- iwm_start_locked(ifp);
+ iwm_start(sc);
}
}
}
/*
* transmit side
*/
/*
* Process a "command done" firmware notification. This is where we wakeup
* processes waiting for a synchronous command completion.
* from if_iwn
*/
static void
iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
{
struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
struct iwm_tx_data *data;
if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
return; /* Not a command ack. */
}
data = &ring->data[pkt->hdr.idx];
/* If the command was mapped in an mbuf, free it. */
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
wakeup(&ring->desc[pkt->hdr.idx]);
}
#if 0
/*
* necessary only for block ack mode
*/
void
iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
uint16_t len)
{
struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
uint16_t w_val;
scd_bc_tbl = sc->sched_dma.vaddr;
len += 8; /* magic numbers came naturally from paris */
if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
len = roundup(len, 4) / 4;
w_val = htole16(sta_id << 12 | len);
/* Update TX scheduler. */
scd_bc_tbl[qid].tfd_offset[idx] = w_val;
bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
BUS_DMASYNC_PREWRITE);
/* I really wonder what this is ?!? */
if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
BUS_DMASYNC_PREWRITE);
}
}
#endif
/*
* Take an 802.11 (non-n) rate, find the relevant rate
* table entry. return the index into in_ridx[].
*
* The caller then uses that index back into in_ridx
* to figure out the rate index programmed /into/
* the firmware for this given node.
*/
static int
iwm_tx_rateidx_lookup(struct iwm_softc *sc, struct iwm_node *in,
uint8_t rate)
{
int i;
uint8_t r;
for (i = 0; i < nitems(in->in_ridx); i++) {
r = iwm_rates[in->in_ridx[i]].rate;
if (rate == r)
return (i);
}
/* XXX Return the first */
/* XXX TODO: have it return the /lowest/ */
return (0);
}
/*
* Fill in various bit for management frames, and leave them
* unfilled for data frames (firmware takes care of that).
* Return the selected TX rate.
*/
static const struct iwm_rate *
iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
{
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni = &in->in_ni;
const struct iwm_rate *rinfo;
int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
int ridx, rate_flags;
tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
/*
* XXX TODO: everything about the rate selection here is terrible!
*/
if (type == IEEE80211_FC0_TYPE_DATA) {
int i;
/* for data frames, use RS table */
(void) ieee80211_ratectl_rate(ni, NULL, 0);
i = iwm_tx_rateidx_lookup(sc, in, ni->ni_txrate);
ridx = in->in_ridx[i];
/* This is the index into the programmed table */
tx->initial_rate_index = i;
tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
"%s: start with i=%d, txrate %d\n",
__func__, i, iwm_rates[ridx].rate);
/* XXX no rate_n_flags? */
return &iwm_rates[ridx];
}
/*
* For non-data, use the lowest supported rate for the given
* operational mode.
*
* Note: there may not be any rate control information available.
* This driver currently assumes if we're transmitting data
* frames, use the rate control table. Grr.
*
* XXX TODO: use the configured rate for the traffic type!
*/
if (ic->ic_curmode == IEEE80211_MODE_11A) {
/*
* XXX this assumes the mode is either 11a or not 11a;
* definitely won't work for 11n.
*/
ridx = IWM_RIDX_OFDM;
} else {
ridx = IWM_RIDX_CCK;
}
rinfo = &iwm_rates[ridx];
IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
__func__, ridx,
rinfo->rate,
!! (IWM_RIDX_IS_CCK(ridx))
);
/* XXX TODO: hard-coded TX antenna? */
rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
if (IWM_RIDX_IS_CCK(ridx))
rate_flags |= IWM_RATE_MCS_CCK_MSK;
/* XXX hard-coded tx rate */
tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
return rinfo;
}
#define TB0_SIZE 16
static int
iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
{
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct iwm_node *in = (struct iwm_node *)ni;
struct iwm_tx_ring *ring;
struct iwm_tx_data *data;
struct iwm_tfd *desc;
struct iwm_device_cmd *cmd;
struct iwm_tx_cmd *tx;
struct ieee80211_frame *wh;
struct ieee80211_key *k = NULL;
struct mbuf *m1;
const struct iwm_rate *rinfo;
uint32_t flags;
u_int hdrlen;
bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
int nsegs;
uint8_t tid, type;
int i, totlen, error, pad;
wh = mtod(m, struct ieee80211_frame *);
hdrlen = ieee80211_anyhdrsize(wh);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
tid = 0;
ring = &sc->txq[ac];
desc = &ring->desc[ring->cur];
memset(desc, 0, sizeof(*desc));
data = &ring->data[ring->cur];
/* Fill out iwm_tx_cmd to send to the firmware */
cmd = &ring->cmd[ring->cur];
cmd->hdr.code = IWM_TX_CMD;
cmd->hdr.flags = 0;
cmd->hdr.qid = ring->qid;
cmd->hdr.idx = ring->cur;
tx = (void *)cmd->data;
memset(tx, 0, sizeof(*tx));
rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX && do software encryption. */
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
m_freem(m);
return (ENOBUFS);
}
/* 802.11 header may have moved. */
wh = mtod(m, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
tap->wt_rate = rinfo->rate;
tap->wt_hwqueue = ac;
if (k != NULL)
tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
ieee80211_radiotap_tx(vap, m);
}
totlen = m->m_pkthdr.len;
flags = 0;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= IWM_TX_CMD_FLG_ACK;
}
if (type != IEEE80211_FC0_TYPE_DATA
&& (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
&& !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
}
if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
type != IEEE80211_FC0_TYPE_DATA)
tx->sta_id = sc->sc_aux_sta.sta_id;
else
tx->sta_id = IWM_STATION_ID;
if (type == IEEE80211_FC0_TYPE_MGT) {
uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
tx->pm_frame_timeout = htole16(3);
else
tx->pm_frame_timeout = htole16(2);
} else {
tx->pm_frame_timeout = htole16(0);
}
if (hdrlen & 3) {
/* First segment length must be a multiple of 4. */
flags |= IWM_TX_CMD_FLG_MH_PAD;
pad = 4 - (hdrlen & 3);
} else
pad = 0;
tx->driver_txop = 0;
tx->next_frame_len = 0;
tx->len = htole16(totlen);
tx->tid_tspec = tid;
tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
/* Set physical address of "scratch area". */
tx->dram_lsb_ptr = htole32(data->scratch_paddr);
tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
/* Copy 802.11 header in TX command. */
memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
tx->sec_ctl = 0;
tx->tx_flags |= htole32(flags);
/* Trim 802.11 header. */
m_adj(m, hdrlen);
error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
if (error != EFBIG) {
device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
error);
m_freem(m);
return error;
}
/* Too many DMA segments, linearize mbuf. */
MGETHDR(m1, M_NOWAIT, MT_DATA);
if (m1 == NULL) {
m_freem(m);
return ENOBUFS;
}
if (m->m_pkthdr.len > MHLEN) {
MCLGET(m1, M_NOWAIT);
if (!(m1->m_flags & M_EXT)) {
m_freem(m);
m_freem(m1);
return ENOBUFS;
}
}
m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
m_freem(m);
m = m1;
error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
error);
m_freem(m);
return error;
}
}
data->m = m;
data->in = in;
data->done = 0;
IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
"sending txd %p, in %p\n", data, data->in);
KASSERT(data->in != NULL, ("node is NULL"));
IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
"sending data: qid=%d idx=%d len=%d nsegs=%d\n",
ring->qid, ring->cur, totlen, nsegs);
/* Fill TX descriptor. */
desc->num_tbs = 2 + nsegs;
desc->tbs[0].lo = htole32(data->cmd_paddr);
desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
(TB0_SIZE << 4);
desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
((sizeof(struct iwm_cmd_header) + sizeof(*tx)
+ hdrlen + pad - TB0_SIZE) << 4);
/* Other DMA segments are for data payload. */
for (i = 0; i < nsegs; i++) {
seg = &segs[i];
desc->tbs[i+2].lo = htole32(seg->ds_addr);
desc->tbs[i+2].hi_n_len = \
htole16(iwm_get_dma_hi_addr(seg->ds_addr))
| ((seg->ds_len) << 4);
}
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
#if 0
iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
#endif
/* Kick TX ring. */
ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
/* Mark TX ring as full if we reach a certain threshold. */
if (++ring->queued > IWM_TX_RING_HIMARK) {
sc->qfullmsk |= 1 << ring->qid;
}
return 0;
}
static int
iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct iwm_softc *sc = ic->ic_softc;
int error = 0;
IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
"->%s begin\n", __func__);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
ieee80211_free_node(ni);
m_freem(m);
IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
"<-%s not RUNNING\n", __func__);
return (ENETDOWN);
}
IWM_LOCK(sc);
/* XXX fix this */
if (params == NULL) {
error = iwm_tx(sc, m, ni, 0);
} else {
error = iwm_tx(sc, m, ni, 0);
}
if (error != 0) {
/* NB: m is reclaimed on tx failure */
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
sc->sc_tx_timer = 5;
IWM_UNLOCK(sc);
return (error);
}
/*
* mvm/tx.c
*/
#if 0
/*
* Note that there are transports that buffer frames before they reach
* the firmware. This means that after flush_tx_path is called, the
* queue might not be empty. The race-free way to handle this is to:
* 1) set the station as draining
* 2) flush the Tx path
* 3) wait for the transport queues to be empty
*/
int
iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
{
struct iwm_tx_path_flush_cmd flush_cmd = {
.queues_ctl = htole32(tfd_msk),
.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
};
int ret;
ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
sizeof(flush_cmd), &flush_cmd);
if (ret)
device_printf(sc->sc_dev,
"Flushing tx queue failed: %d\n", ret);
return ret;
}
#endif
/*
* BEGIN mvm/sta.c
*/
static void
iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
{
memset(cmd_v5, 0, sizeof(*cmd_v5));
cmd_v5->add_modify = cmd_v6->add_modify;
cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
cmd_v5->sta_id = cmd_v6->sta_id;
cmd_v5->modify_mask = cmd_v6->modify_mask;
cmd_v5->station_flags = cmd_v6->station_flags;
cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
cmd_v5->assoc_id = cmd_v6->assoc_id;
cmd_v5->beamform_flags = cmd_v6->beamform_flags;
cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
}
static int
iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
{
struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
sizeof(*cmd), cmd, status);
}
iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
&cmd_v5, status);
}
/* send station add/update command to firmware */
static int
iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
{
struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
int ret;
uint32_t status;
memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
add_sta_cmd.sta_id = IWM_STATION_ID;
add_sta_cmd.mac_id_n_color
= htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
IWM_DEFAULT_COLOR));
if (!update) {
add_sta_cmd.tfd_queue_msk = htole32(0xf);
IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
}
add_sta_cmd.add_modify = update ? 1 : 0;
add_sta_cmd.station_flags_msk
|= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
status = IWM_ADD_STA_SUCCESS;
ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
if (ret)
return ret;
switch (status) {
case IWM_ADD_STA_SUCCESS:
break;
default:
ret = EIO;
device_printf(sc->sc_dev, "IWM_ADD_STA failed\n");
break;
}
return ret;
}
static int
iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
{
int ret;
ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
if (ret)
return ret;
return 0;
}
static int
iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
{
return iwm_mvm_sta_send_to_fw(sc, in, 1);
}
static int
iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
const uint8_t *addr, uint16_t mac_id, uint16_t color)
{
struct iwm_mvm_add_sta_cmd_v6 cmd;
int ret;
uint32_t status;
memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
if (addr)
IEEE80211_ADDR_COPY(cmd.addr, addr);
ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
if (ret)
return ret;
switch (status) {
case IWM_ADD_STA_SUCCESS:
IWM_DPRINTF(sc, IWM_DEBUG_RESET,
"%s: Internal station added.\n", __func__);
return 0;
default:
device_printf(sc->sc_dev,
"%s: Add internal station failed, status=0x%x\n",
__func__, status);
ret = EIO;
break;
}
return ret;
}
static int
iwm_mvm_add_aux_sta(struct iwm_softc *sc)
{
int ret;
sc->sc_aux_sta.sta_id = 3;
sc->sc_aux_sta.tfd_queue_msk = 0;
ret = iwm_mvm_add_int_sta_common(sc,
&sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
if (ret)
memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
return ret;
}
/*
* END mvm/sta.c
*/
/*
* BEGIN mvm/quota.c
*/
static int
iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
{
struct iwm_time_quota_cmd cmd;
int i, idx, ret, num_active_macs, quota, quota_rem;
int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
int n_ifs[IWM_MAX_BINDINGS] = {0, };
uint16_t id;
memset(&cmd, 0, sizeof(cmd));
/* currently, PHY ID == binding ID */
if (in) {
id = in->in_phyctxt->id;
KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
colors[id] = in->in_phyctxt->color;
if (1)
n_ifs[id] = 1;
}
/*
* The FW's scheduling session consists of
* IWM_MVM_MAX_QUOTA fragments. Divide these fragments
* equally between all the bindings that require quota
*/
num_active_macs = 0;
for (i = 0; i < IWM_MAX_BINDINGS; i++) {
cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
num_active_macs += n_ifs[i];
}
quota = 0;
quota_rem = 0;
if (num_active_macs) {
quota = IWM_MVM_MAX_QUOTA / num_active_macs;
quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
}
for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
if (colors[i] < 0)
continue;
cmd.quotas[idx].id_and_color =
htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
if (n_ifs[i] <= 0) {
cmd.quotas[idx].quota = htole32(0);
cmd.quotas[idx].max_duration = htole32(0);
} else {
cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
cmd.quotas[idx].max_duration = htole32(0);
}
idx++;
}
/* Give the remainder of the session to the first binding */
cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
device_printf(sc->sc_dev,
"%s: Failed to send quota: %d\n", __func__, ret);
return ret;
}
/*
* END mvm/quota.c
*/
/*
* ieee80211 routines
*/
/*
* Change to AUTH state in 80211 state machine. Roughly matches what
* Linux does in bss_info_changed().
*/
static int
iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
{
struct ieee80211_node *ni;
struct iwm_node *in;
struct iwm_vap *iv = IWM_VAP(vap);
uint32_t duration;
uint32_t min_duration;
int error;
/*
* XXX i have a feeling that the vap node is being
* freed from underneath us. Grr.
*/
ni = ieee80211_ref_node(vap->iv_bss);
in = (struct iwm_node *) ni;
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
"%s: called; vap=%p, bss ni=%p\n",
__func__,
vap,
ni);
in->in_assoc = 0;
error = iwm_allow_mcast(vap, sc);
if (error) {
device_printf(sc->sc_dev,
"%s: failed to set multicast\n", __func__);
goto out;
}
/*
* This is where it deviates from what Linux does.
*
* Linux iwlwifi doesn't reset the nic each time, nor does it
* call ctxt_add() here. Instead, it adds it during vap creation,
* and always does does a mac_ctx_changed().
*
* The openbsd port doesn't attempt to do that - it reset things
* at odd states and does the add here.
*
* So, until the state handling is fixed (ie, we never reset
* the NIC except for a firmware failure, which should drag
* the NIC back to IDLE, re-setup and re-add all the mac/phy
* contexts that are required), let's do a dirty hack here.
*/
if (iv->is_uploaded) {
if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
device_printf(sc->sc_dev,
"%s: failed to add MAC\n", __func__);
goto out;
}
} else {
if ((error = iwm_mvm_mac_ctxt_add(sc, vap)) != 0) {
device_printf(sc->sc_dev,
"%s: failed to add MAC\n", __func__);
goto out;
}
}
if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
in->in_ni.ni_chan, 1, 1)) != 0) {
device_printf(sc->sc_dev,
"%s: failed add phy ctxt\n", __func__);
goto out;
}
in->in_phyctxt = &sc->sc_phyctxt[0];
if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
device_printf(sc->sc_dev,
"%s: binding cmd\n", __func__);
goto out;
}
if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
device_printf(sc->sc_dev,
"%s: failed to add MAC\n", __func__);
goto out;
}
/* a bit superfluous? */
while (sc->sc_auth_prot)
msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmauth", 0);
sc->sc_auth_prot = 1;
duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
200 + in->in_ni.ni_intval);
min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
100 + in->in_ni.ni_intval);
iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
IWM_DPRINTF(sc, IWM_DEBUG_RESET,
"%s: waiting for auth_prot\n", __func__);
while (sc->sc_auth_prot != 2) {
/*
* well, meh, but if the kernel is sleeping for half a
* second, we have bigger problems
*/
if (sc->sc_auth_prot == 0) {
device_printf(sc->sc_dev,
"%s: missed auth window!\n", __func__);
error = ETIMEDOUT;
goto out;
} else if (sc->sc_auth_prot == -1) {
device_printf(sc->sc_dev,
"%s: no time event, denied!\n", __func__);
sc->sc_auth_prot = 0;
error = EAUTH;
goto out;
}
msleep(&sc->sc_auth_prot, &sc->sc_mtx, 0, "iwmau2", 0);
}
IWM_DPRINTF(sc, IWM_DEBUG_RESET, "<-%s\n", __func__);
error = 0;
out:
ieee80211_free_node(ni);
return (error);
}
static int
iwm_assoc(struct ieee80211vap *vap, struct iwm_softc *sc)
{
struct iwm_node *in = (struct iwm_node *)vap->iv_bss;
int error;
if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
device_printf(sc->sc_dev,
"%s: failed to update STA\n", __func__);
return error;
}
in->in_assoc = 1;
if ((error = iwm_mvm_mac_ctxt_changed(sc, vap)) != 0) {
device_printf(sc->sc_dev,
"%s: failed to update MAC\n", __func__);
return error;
}
return 0;
}
static int
iwm_release(struct iwm_softc *sc, struct iwm_node *in)
{
/*
* Ok, so *technically* the proper set of calls for going
* from RUN back to SCAN is:
*
* iwm_mvm_power_mac_disable(sc, in);
* iwm_mvm_mac_ctxt_changed(sc, in);
* iwm_mvm_rm_sta(sc, in);
* iwm_mvm_update_quotas(sc, NULL);
* iwm_mvm_mac_ctxt_changed(sc, in);
* iwm_mvm_binding_remove_vif(sc, in);
* iwm_mvm_mac_ctxt_remove(sc, in);
*
* However, that freezes the device not matter which permutations
* and modifications are attempted. Obviously, this driver is missing
* something since it works in the Linux driver, but figuring out what
* is missing is a little more complicated. Now, since we're going
* back to nothing anyway, we'll just do a complete device reset.
* Up your's, device!
*/
//iwm_mvm_flush_tx_path(sc, 0xf, 1);
iwm_stop_device(sc);
iwm_init_hw(sc);
if (in)
in->in_assoc = 0;
return 0;
#if 0
int error;
iwm_mvm_power_mac_disable(sc, in);
if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
device_printf(sc->sc_dev, "mac ctxt change fail 1 %d\n", error);
return error;
}
if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
device_printf(sc->sc_dev, "sta remove fail %d\n", error);
return error;
}
error = iwm_mvm_rm_sta(sc, in);
in->in_assoc = 0;
iwm_mvm_update_quotas(sc, NULL);
if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
device_printf(sc->sc_dev, "mac ctxt change fail 2 %d\n", error);
return error;
}
iwm_mvm_binding_remove_vif(sc, in);
iwm_mvm_mac_ctxt_remove(sc, in);
return error;
#endif
}
static struct ieee80211_node *
iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
return malloc(sizeof (struct iwm_node), M_80211_NODE,
M_NOWAIT | M_ZERO);
}
static void
iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
{
struct ieee80211_node *ni = &in->in_ni;
struct iwm_lq_cmd *lq = &in->in_lq;
int nrates = ni->ni_rates.rs_nrates;
int i, ridx, tab = 0;
int txant = 0;
if (nrates > nitems(lq->rs_table)) {
device_printf(sc->sc_dev,
"%s: node supports %d rates, driver handles "
"only %zu\n", __func__, nrates, nitems(lq->rs_table));
return;
}
/*
* XXX .. and most of iwm_node is not initialised explicitly;
* it's all just 0x0 passed to the firmware.
*/
/* first figure out which rates we should support */
/* XXX TODO: this isn't 11n aware /at all/ */
memset(&in->in_ridx, -1, sizeof(in->in_ridx));
IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
"%s: nrates=%d\n", __func__, nrates);
for (i = 0; i < nrates; i++) {
int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
/* Map 802.11 rate to HW rate index. */
for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
if (iwm_rates[ridx].rate == rate)
break;
if (ridx > IWM_RIDX_MAX) {
device_printf(sc->sc_dev,
"%s: WARNING: device rate for %d not found!\n",
__func__, rate);
} else {
IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
"%s: rate: i: %d, rate=%d, ridx=%d\n",
__func__,
i,
rate,
ridx);
in->in_ridx[i] = ridx;
}
}
/* then construct a lq_cmd based on those */
memset(lq, 0, sizeof(*lq));
lq->sta_id = IWM_STATION_ID;
/*
* are these used? (we don't do SISO or MIMO)
* need to set them to non-zero, though, or we get an error.
*/
lq->single_stream_ant_msk = 1;
lq->dual_stream_ant_msk = 1;
/*
* Build the actual rate selection table.
* The lowest bits are the rates. Additionally,
* CCK needs bit 9 to be set. The rest of the bits
* we add to the table select the tx antenna
* Note that we add the rates in the highest rate first
* (opposite of ni_rates).
*/
/*
* XXX TODO: this should be looping over the min of nrates
* and LQ_MAX_RETRY_NUM. Sigh.
*/
for (i = 0; i < nrates; i++) {
int nextant;
if (txant == 0)
txant = IWM_FW_VALID_TX_ANT(sc);
nextant = 1<<(ffs(txant)-1);
txant &= ~nextant;
/*
* Map the rate id into a rate index into
* our hardware table containing the
* configuration to use for this rate.
*/
ridx = in->in_ridx[(nrates-1)-i];
tab = iwm_rates[ridx].plcp;
tab |= nextant << IWM_RATE_MCS_ANT_POS;
if (IWM_RIDX_IS_CCK(ridx))
tab |= IWM_RATE_MCS_CCK_MSK;
IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
"station rate i=%d, rate=%d, hw=%x\n",
i, iwm_rates[ridx].rate, tab);
lq->rs_table[i] = htole32(tab);
}
/* then fill the rest with the lowest possible rate */
for (i = nrates; i < nitems(lq->rs_table); i++) {
KASSERT(tab != 0, ("invalid tab"));
lq->rs_table[i] = htole32(tab);
}
}
static int
iwm_media_change(struct ifnet *ifp)
{
- struct iwm_softc *sc = ifp->if_softc;
+ struct ieee80211vap *vap = ifp->if_softc;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct iwm_softc *sc = ic->ic_softc;
int error;
error = ieee80211_media_change(ifp);
if (error != ENETRESET)
return error;
- if ((ifp->if_flags & IFF_UP) &&
- (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- iwm_stop(ifp, 0);
+ IWM_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ iwm_stop(sc);
iwm_init(sc);
}
+ IWM_UNLOCK(sc);
return error;
}
static int
iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct iwm_vap *ivp = IWM_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct iwm_softc *sc = ic->ic_softc;
struct iwm_node *in;
int error;
IWM_DPRINTF(sc, IWM_DEBUG_STATE,
"switching state %s -> %s\n",
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
IWM_LOCK(sc);
/* disable beacon filtering if we're hopping out of RUN */
if (vap->iv_state == IEEE80211_S_RUN && nstate != vap->iv_state) {
iwm_mvm_disable_beacon_filter(sc);
if (((in = (void *)vap->iv_bss) != NULL))
in->in_assoc = 0;
iwm_release(sc, NULL);
/*
* It's impossible to directly go RUN->SCAN. If we iwm_release()
* above then the card will be completely reinitialized,
* so the driver must do everything necessary to bring the card
* from INIT to SCAN.
*
* Additionally, upon receiving deauth frame from AP,
* OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
* state. This will also fail with this driver, so bring the FSM
* from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
*
* XXX TODO: fix this for FreeBSD!
*/
if (nstate == IEEE80211_S_SCAN ||
nstate == IEEE80211_S_AUTH ||
nstate == IEEE80211_S_ASSOC) {
IWM_DPRINTF(sc, IWM_DEBUG_STATE,
"Force transition to INIT; MGT=%d\n", arg);
IWM_UNLOCK(sc);
IEEE80211_LOCK(ic);
vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
IWM_DPRINTF(sc, IWM_DEBUG_STATE,
"Going INIT->SCAN\n");
nstate = IEEE80211_S_SCAN;
IEEE80211_UNLOCK(ic);
IWM_LOCK(sc);
}
}
switch (nstate) {
case IEEE80211_S_INIT:
sc->sc_scanband = 0;
break;
case IEEE80211_S_AUTH:
if ((error = iwm_auth(vap, sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not move to auth state: %d\n",
__func__, error);
break;
}
break;
case IEEE80211_S_ASSOC:
if ((error = iwm_assoc(vap, sc)) != 0) {
device_printf(sc->sc_dev,
"%s: failed to associate: %d\n", __func__,
error);
break;
}
break;
case IEEE80211_S_RUN:
{
struct iwm_host_cmd cmd = {
.id = IWM_LQ_CMD,
.len = { sizeof(in->in_lq), },
.flags = IWM_CMD_SYNC,
};
/* Update the association state, now we have it all */
/* (eg associd comes in at this point */
error = iwm_assoc(vap, sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: failed to update association state: %d\n",
__func__,
error);
break;
}
in = (struct iwm_node *)vap->iv_bss;
iwm_mvm_power_mac_update_mode(sc, in);
iwm_mvm_enable_beacon_filter(sc, in);
iwm_mvm_update_quotas(sc, in);
iwm_setrates(sc, in);
cmd.data[0] = &in->in_lq;
if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
device_printf(sc->sc_dev,
"%s: IWM_LQ_CMD failed\n", __func__);
}
break;
}
default:
break;
}
IWM_UNLOCK(sc);
IEEE80211_LOCK(ic);
return (ivp->iv_newstate(vap, nstate, arg));
}
void
iwm_endscan_cb(void *arg, int pending)
{
struct iwm_softc *sc = arg;
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
int done;
int error;
IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
"%s: scan ended\n",
__func__);
IWM_LOCK(sc);
if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
sc->sc_nvm.sku_cap_band_52GHz_enable) {
done = 0;
if ((error = iwm_mvm_scan_request(sc,
IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
device_printf(sc->sc_dev, "could not initiate scan\n");
done = 1;
}
} else {
done = 1;
}
if (done) {
IWM_UNLOCK(sc);
ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
IWM_LOCK(sc);
sc->sc_scanband = 0;
}
IWM_UNLOCK(sc);
}
static int
iwm_init_hw(struct iwm_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
int error, i, qid;
if ((error = iwm_start_hw(sc)) != 0)
return error;
if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
return error;
}
/*
* should stop and start HW since that INIT
* image just loaded
*/
iwm_stop_device(sc);
if ((error = iwm_start_hw(sc)) != 0) {
device_printf(sc->sc_dev, "could not initialize hardware\n");
return error;
}
/* omstart, this time with the regular firmware */
error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
if (error) {
device_printf(sc->sc_dev, "could not load firmware\n");
goto error;
}
if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
goto error;
/* Send phy db control command and then phy db calibration*/
if ((error = iwm_send_phy_db_data(sc)) != 0)
goto error;
if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
goto error;
/* Add auxiliary station for scanning */
if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
goto error;
for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
/*
* The channel used here isn't relevant as it's
* going to be overwritten in the other flows.
* For now use the first channel we have.
*/
if ((error = iwm_mvm_phy_ctxt_add(sc,
&sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
goto error;
}
error = iwm_mvm_power_update_device(sc);
if (error)
goto error;
/* Mark TX rings as active. */
for (qid = 0; qid < 4; qid++) {
iwm_enable_txq(sc, qid, qid);
}
return 0;
error:
iwm_stop_device(sc);
return error;
}
/* Allow multicast from our BSSID. */
static int
iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
{
struct ieee80211_node *ni = vap->iv_bss;
struct iwm_mcast_filter_cmd *cmd;
size_t size;
int error;
size = roundup(sizeof(*cmd), 4);
cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (cmd == NULL)
return ENOMEM;
cmd->filter_own = 1;
cmd->port_id = 0;
cmd->count = 0;
cmd->pass_all = 1;
IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
IWM_CMD_SYNC, size, cmd);
free(cmd, M_DEVBUF);
return (error);
}
-/*
- * ifnet interfaces
- */
-
static void
-iwm_init(void *arg)
+iwm_init(struct iwm_softc *sc)
{
- struct iwm_softc *sc = arg;
-
- IWM_LOCK(sc);
- iwm_init_locked(sc);
- IWM_UNLOCK(sc);
-}
-
-static void
-iwm_init_locked(struct iwm_softc *sc)
-{
- struct ifnet *ifp = sc->sc_ifp;
int error;
if (sc->sc_flags & IWM_FLAG_HW_INITED) {
return;
}
sc->sc_generation++;
sc->sc_flags &= ~IWM_FLAG_STOPPED;
if ((error = iwm_init_hw(sc)) != 0) {
- iwm_stop_locked(ifp);
+ iwm_stop(sc);
return;
}
/*
* Ok, firmware loaded and we are jogging
*/
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
sc->sc_flags |= IWM_FLAG_HW_INITED;
callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
}
-/*
- * Dequeue packets from sendq and call send.
- * mostly from iwn
- */
-static void
-iwm_start(struct ifnet *ifp)
+static int
+iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct iwm_softc *sc = ifp->if_softc;
+ struct iwm_softc *sc;
+ int error;
+ sc = ic->ic_softc;
+
IWM_LOCK(sc);
- iwm_start_locked(ifp);
+ if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
+ IWM_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ IWM_UNLOCK(sc);
+ return (error);
+ }
+ iwm_start(sc);
IWM_UNLOCK(sc);
+ return (0);
}
+/*
+ * Dequeue packets from sendq and call send.
+ */
static void
-iwm_start_locked(struct ifnet *ifp)
+iwm_start(struct iwm_softc *sc)
{
- struct iwm_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
int ac = 0;
- if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING)
- return;
-
IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
- for (;;) {
- /* why isn't this done per-queue? */
- if (sc->qfullmsk != 0) {
- ifp->if_flags |= IFF_DRV_OACTIVE;
- break;
- }
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (!m)
- break;
+ while (sc->qfullmsk == 0 &&
+ (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
if (iwm_tx(sc, m, ni, ac) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
continue;
}
-
- if (ifp->if_flags & IFF_UP) {
- sc->sc_tx_timer = 15;
- }
+ sc->sc_tx_timer = 15;
}
IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
-
- return;
}
static void
-iwm_stop(struct ifnet *ifp, int disable)
+iwm_stop(struct iwm_softc *sc)
{
- struct iwm_softc *sc = ifp->if_softc;
- IWM_LOCK(sc);
- iwm_stop_locked(ifp);
- IWM_UNLOCK(sc);
-}
-
-static void
-iwm_stop_locked(struct ifnet *ifp)
-{
- struct iwm_softc *sc = ifp->if_softc;
-
sc->sc_flags &= ~IWM_FLAG_HW_INITED;
sc->sc_flags |= IWM_FLAG_STOPPED;
sc->sc_generation++;
sc->sc_scanband = 0;
sc->sc_auth_prot = 0;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
sc->sc_tx_timer = 0;
iwm_stop_device(sc);
}
static void
iwm_watchdog(void *arg)
{
struct iwm_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
if (sc->sc_tx_timer > 0) {
if (--sc->sc_tx_timer == 0) {
device_printf(sc->sc_dev, "device timeout\n");
#ifdef IWM_DEBUG
iwm_nic_error(sc);
#endif
- ifp->if_flags &= ~IFF_UP;
- iwm_stop_locked(ifp);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ iwm_stop(sc);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
return;
}
}
callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
}
-static int
-iwm_ioctl(struct ifnet *ifp, u_long cmd, iwm_caddr_t data)
+static void
+iwm_parent(struct ieee80211com *ic)
{
- struct iwm_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = sc->sc_ic;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ struct iwm_softc *sc = ic->ic_softc;
+ int startall = 0;
- switch (cmd) {
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCSIFFLAGS:
- IWM_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- iwm_init_locked(sc);
- startall = 1;
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- iwm_stop_locked(ifp);
+ IWM_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
+ iwm_init(sc);
+ startall = 1;
}
- IWM_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
-
- break;
- default:
- error = EINVAL;
- break;
- }
-
- return error;
+ } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
+ iwm_stop(sc);
+ IWM_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
/*
* The interrupt side of things
*/
/*
* error dumping routines are from iwlwifi/mvm/utils.c
*/
/*
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
* read with uint32_t-sized accesses, any members with a different size
* need to be ordered correctly though!
*/
struct iwm_error_event_table {
uint32_t valid; /* (nonzero) valid, (0) log is empty */
uint32_t error_id; /* type of error */
uint32_t pc; /* program counter */
uint32_t blink1; /* branch link */
uint32_t blink2; /* branch link */
uint32_t ilink1; /* interrupt link */
uint32_t ilink2; /* interrupt link */
uint32_t data1; /* error-specific data */
uint32_t data2; /* error-specific data */
uint32_t data3; /* error-specific data */
uint32_t bcon_time; /* beacon timer */
uint32_t tsf_low; /* network timestamp function timer */
uint32_t tsf_hi; /* network timestamp function timer */
uint32_t gp1; /* GP1 timer register */
uint32_t gp2; /* GP2 timer register */
uint32_t gp3; /* GP3 timer register */
uint32_t ucode_ver; /* uCode version */
uint32_t hw_ver; /* HW Silicon version */
uint32_t brd_ver; /* HW board version */
uint32_t log_pc; /* log program counter */
uint32_t frame_ptr; /* frame pointer */
uint32_t stack_ptr; /* stack pointer */
uint32_t hcmd; /* last host command header */
uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
* rxtx_flag */
uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
* host_flag */
uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
* enc_flag */
uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
* time_flag */
uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
uint32_t wait_event; /* wait event() caller address */
uint32_t l2p_control; /* L2pControlField */
uint32_t l2p_duration; /* L2pDurationField */
uint32_t l2p_mhvalid; /* L2pMhValidBits */
uint32_t l2p_addr_match; /* L2pAddrMatchStat */
uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
* (LMPM_PMG_SEL) */
uint32_t u_timestamp; /* indicate when the date and time of the
* compilation */
uint32_t flow_handler; /* FH read/write pointers, RX credit */
} __packed;
#define ERROR_START_OFFSET (1 * sizeof(uint32_t))
#define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
#ifdef IWM_DEBUG
struct {
const char *name;
uint8_t num;
} advanced_lookup[] = {
{ "NMI_INTERRUPT_WDG", 0x34 },
{ "SYSASSERT", 0x35 },
{ "UCODE_VERSION_MISMATCH", 0x37 },
{ "BAD_COMMAND", 0x38 },
{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
{ "FATAL_ERROR", 0x3D },
{ "NMI_TRM_HW_ERR", 0x46 },
{ "NMI_INTERRUPT_TRM", 0x4C },
{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
{ "NMI_INTERRUPT_HOST", 0x66 },
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
{ "ADVANCED_SYSASSERT", 0 },
};
static const char *
iwm_desc_lookup(uint32_t num)
{
int i;
for (i = 0; i < nitems(advanced_lookup) - 1; i++)
if (advanced_lookup[i].num == num)
return advanced_lookup[i].name;
/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
return advanced_lookup[i].name;
}
/*
* Support for dumping the error log seemed like a good idea ...
* but it's mostly hex junk and the only sensible thing is the
* hw/ucode revision (which we know anyway). Since it's here,
* I'll just leave it in, just in case e.g. the Intel guys want to
* help us decipher some "ADVANCED_SYSASSERT" later.
*/
static void
iwm_nic_error(struct iwm_softc *sc)
{
struct iwm_error_event_table table;
uint32_t base;
device_printf(sc->sc_dev, "dumping device error log\n");
base = sc->sc_uc.uc_error_event_table;
if (base < 0x800000 || base >= 0x80C000) {
device_printf(sc->sc_dev,
"Not valid error log pointer 0x%08x\n", base);
return;
}
if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
device_printf(sc->sc_dev, "reading errlog failed\n");
return;
}
if (!table.valid) {
device_printf(sc->sc_dev, "errlog not found, skipping\n");
return;
}
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
sc->sc_flags, table.valid);
}
device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
iwm_desc_lookup(table.error_id));
device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
}
#endif
#define SYNC_RESP_STRUCT(_var_, _pkt_) \
do { \
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
_var_ = (void *)((_pkt_)+1); \
} while (/*CONSTCOND*/0)
#define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
do { \
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);\
_ptr_ = (void *)((_pkt_)+1); \
} while (/*CONSTCOND*/0)
#define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
/*
* Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
* Basic structure from if_iwn
*/
static void
iwm_notif_intr(struct iwm_softc *sc)
{
uint16_t hw;
bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
BUS_DMASYNC_POSTREAD);
hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
while (sc->rxq.cur != hw) {
struct iwm_rx_ring *ring = &sc->rxq;
struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
struct iwm_rx_packet *pkt;
struct iwm_cmd_response *cresp;
int qid, idx;
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
pkt = mtod(data->m, struct iwm_rx_packet *);
qid = pkt->hdr.qid & ~0x80;
idx = pkt->hdr.idx;
IWM_DPRINTF(sc, IWM_DEBUG_INTR,
"rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
pkt->hdr.code, sc->rxq.cur, hw);
/*
* randomly get these from the firmware, no idea why.
* they at least seem harmless, so just ignore them for now
*/
if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
|| pkt->len_n_flags == htole32(0x55550000))) {
ADVANCE_RXQ(sc);
continue;
}
switch (pkt->hdr.code) {
case IWM_REPLY_RX_PHY_CMD:
iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
break;
case IWM_REPLY_RX_MPDU_CMD:
iwm_mvm_rx_rx_mpdu(sc, pkt, data);
break;
case IWM_TX_CMD:
iwm_mvm_rx_tx_cmd(sc, pkt, data);
break;
case IWM_MISSED_BEACONS_NOTIFICATION: {
struct iwm_missed_beacons_notif *resp;
int missed;
/* XXX look at mac_id to determine interface ID */
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
SYNC_RESP_STRUCT(resp, pkt);
missed = le32toh(resp->consec_missed_beacons);
IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
"%s: MISSED_BEACON: mac_id=%d, "
"consec_since_last_rx=%d, consec=%d, num_expect=%d "
"num_rx=%d\n",
__func__,
le32toh(resp->mac_id),
le32toh(resp->consec_missed_beacons_since_last_rx),
le32toh(resp->consec_missed_beacons),
le32toh(resp->num_expected_beacons),
le32toh(resp->num_recvd_beacons));
/* Be paranoid */
if (vap == NULL)
break;
/* XXX no net80211 locking? */
if (vap->iv_state == IEEE80211_S_RUN &&
(ic->ic_flags & IEEE80211_F_SCAN) == 0) {
if (missed > vap->iv_bmissthreshold) {
/* XXX bad locking; turn into task */
IWM_UNLOCK(sc);
ieee80211_beacon_miss(ic);
IWM_LOCK(sc);
}
}
break; }
case IWM_MVM_ALIVE: {
struct iwm_mvm_alive_resp *resp;
SYNC_RESP_STRUCT(resp, pkt);
sc->sc_uc.uc_error_event_table
= le32toh(resp->error_event_table_ptr);
sc->sc_uc.uc_log_event_table
= le32toh(resp->log_event_table_ptr);
sc->sched_base = le32toh(resp->scd_base_ptr);
sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
sc->sc_uc.uc_intr = 1;
wakeup(&sc->sc_uc);
break; }
case IWM_CALIB_RES_NOTIF_PHY_DB: {
struct iwm_calib_res_notif_phy_db *phy_db_notif;
SYNC_RESP_STRUCT(phy_db_notif, pkt);
iwm_phy_db_set_section(sc, phy_db_notif);
break; }
case IWM_STATISTICS_NOTIFICATION: {
struct iwm_notif_statistics *stats;
SYNC_RESP_STRUCT(stats, pkt);
memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
sc->sc_noise = iwm_get_noise(&stats->rx.general);
break; }
case IWM_NVM_ACCESS_CMD:
if (sc->sc_wantresp == ((qid << 16) | idx)) {
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
memcpy(sc->sc_cmd_resp,
pkt, sizeof(sc->sc_cmd_resp));
}
break;
case IWM_PHY_CONFIGURATION_CMD:
case IWM_TX_ANT_CONFIGURATION_CMD:
case IWM_ADD_STA:
case IWM_MAC_CONTEXT_CMD:
case IWM_REPLY_SF_CFG_CMD:
case IWM_POWER_TABLE_CMD:
case IWM_PHY_CONTEXT_CMD:
case IWM_BINDING_CONTEXT_CMD:
case IWM_TIME_EVENT_CMD:
case IWM_SCAN_REQUEST_CMD:
case IWM_REPLY_BEACON_FILTERING_CMD:
case IWM_MAC_PM_POWER_TABLE:
case IWM_TIME_QUOTA_CMD:
case IWM_REMOVE_STA:
case IWM_TXPATH_FLUSH:
case IWM_LQ_CMD:
SYNC_RESP_STRUCT(cresp, pkt);
if (sc->sc_wantresp == ((qid << 16) | idx)) {
memcpy(sc->sc_cmd_resp,
pkt, sizeof(*pkt)+sizeof(*cresp));
}
break;
/* ignore */
case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
break;
case IWM_INIT_COMPLETE_NOTIF:
sc->sc_init_complete = 1;
wakeup(&sc->sc_init_complete);
break;
case IWM_SCAN_COMPLETE_NOTIFICATION: {
struct iwm_scan_complete_notif *notif;
SYNC_RESP_STRUCT(notif, pkt);
taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
break; }
case IWM_REPLY_ERROR: {
struct iwm_error_resp *resp;
SYNC_RESP_STRUCT(resp, pkt);
device_printf(sc->sc_dev,
"firmware error 0x%x, cmd 0x%x\n",
le32toh(resp->error_type),
resp->cmd_id);
break; }
case IWM_TIME_EVENT_NOTIFICATION: {
struct iwm_time_event_notif *notif;
SYNC_RESP_STRUCT(notif, pkt);
if (notif->status) {
if (le32toh(notif->action) &
IWM_TE_V2_NOTIF_HOST_EVENT_START)
sc->sc_auth_prot = 2;
else
sc->sc_auth_prot = 0;
} else {
sc->sc_auth_prot = -1;
}
IWM_DPRINTF(sc, IWM_DEBUG_INTR,
"%s: time event notification auth_prot=%d\n",
__func__, sc->sc_auth_prot);
wakeup(&sc->sc_auth_prot);
break; }
case IWM_MCAST_FILTER_CMD:
break;
default:
device_printf(sc->sc_dev,
"frame %d/%d %x UNHANDLED (this should "
"not happen)\n", qid, idx,
pkt->len_n_flags);
break;
}
/*
* Why test bit 0x80? The Linux driver:
*
* There is one exception: uCode sets bit 15 when it
* originates the response/notification, i.e. when the
* response/notification is not a direct response to a
* command sent by the driver. For example, uCode issues
* IWM_REPLY_RX when it sends a received frame to the driver;
* it is not a direct response to any driver command.
*
* Ok, so since when is 7 == 15? Well, the Linux driver
* uses a slightly different format for pkt->hdr, and "qid"
* is actually the upper byte of a two-byte field.
*/
if (!(pkt->hdr.qid & (1 << 7))) {
iwm_cmd_done(sc, pkt);
}
ADVANCE_RXQ(sc);
}
IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Tell the firmware what we have processed.
* Seems like the hardware gets upset unless we align
* the write by 8??
*/
hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
}
static void
iwm_intr(void *arg)
{
struct iwm_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
int handled = 0;
int r1, r2, rv = 0;
int isperiodic = 0;
IWM_LOCK(sc);
IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
if (sc->sc_flags & IWM_FLAG_USE_ICT) {
uint32_t *ict = sc->ict_dma.vaddr;
int tmp;
tmp = htole32(ict[sc->ict_cur]);
if (!tmp)
goto out_ena;
/*
* ok, there was something. keep plowing until we have all.
*/
r1 = r2 = 0;
while (tmp) {
r1 |= tmp;
ict[sc->ict_cur] = 0;
sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
tmp = htole32(ict[sc->ict_cur]);
}
/* this is where the fun begins. don't ask */
if (r1 == 0xffffffff)
r1 = 0;
/* i am not expected to understand this */
if (r1 & 0xc0000)
r1 |= 0x8000;
r1 = (0xff & r1) | ((0xff00 & r1) << 16);
} else {
r1 = IWM_READ(sc, IWM_CSR_INT);
/* "hardware gone" (where, fishing?) */
if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
goto out;
r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
}
if (r1 == 0 && r2 == 0) {
goto out_ena;
}
IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
/* ignored */
handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
#ifdef IWM_DEBUG
int i;
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
iwm_nic_error(sc);
/* Dump driver status (TX and RX rings) while we're here. */
device_printf(sc->sc_dev, "driver status:\n");
for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
struct iwm_tx_ring *ring = &sc->txq[i];
device_printf(sc->sc_dev,
" tx ring %2d: qid=%-2d cur=%-3d "
"queued=%-3d\n",
i, ring->qid, ring->cur, ring->queued);
}
device_printf(sc->sc_dev,
" rx ring: cur=%d\n", sc->rxq.cur);
device_printf(sc->sc_dev,
" 802.11 state %d\n", vap->iv_state);
#endif
device_printf(sc->sc_dev, "fatal firmware error\n");
- ifp->if_flags &= ~IFF_UP;
- iwm_stop_locked(ifp);
+ iwm_stop(sc);
rv = 1;
goto out;
}
if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
handled |= IWM_CSR_INT_BIT_HW_ERR;
device_printf(sc->sc_dev, "hardware error, stopping device\n");
- ifp->if_flags &= ~IFF_UP;
- iwm_stop_locked(ifp);
+ iwm_stop(sc);
rv = 1;
goto out;
}
/* firmware chunk loaded */
if (r1 & IWM_CSR_INT_BIT_FH_TX) {
IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
handled |= IWM_CSR_INT_BIT_FH_TX;
sc->sc_fw_chunk_done = 1;
wakeup(&sc->sc_fw);
}
if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
handled |= IWM_CSR_INT_BIT_RF_KILL;
- if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
+ if (iwm_check_rfkill(sc)) {
device_printf(sc->sc_dev,
"%s: rfkill switch, disabling interface\n",
__func__);
- ifp->if_flags &= ~IFF_UP;
- iwm_stop_locked(ifp);
+ iwm_stop(sc);
}
}
/*
* The Linux driver uses periodic interrupts to avoid races.
* We cargo-cult like it's going out of fashion.
*/
if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
IWM_WRITE_1(sc,
IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
isperiodic = 1;
}
if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
iwm_notif_intr(sc);
/* enable periodic interrupt, see above */
if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
IWM_CSR_INT_PERIODIC_ENA);
}
if (__predict_false(r1 & ~handled))
IWM_DPRINTF(sc, IWM_DEBUG_INTR,
"%s: unhandled interrupts: %x\n", __func__, r1);
rv = 1;
out_ena:
iwm_restore_interrupts(sc);
out:
IWM_UNLOCK(sc);
return;
}
/*
* Autoconf glue-sniffing
*/
#define PCI_VENDOR_INTEL 0x8086
#define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
#define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
#define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
#define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
#define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
#define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
static const struct iwm_devices {
uint16_t device;
const char *name;
} iwm_devices[] = {
{ PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
{ PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
{ PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
{ PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
{ PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
{ PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
};
static int
iwm_probe(device_t dev)
{
int i;
for (i = 0; i < nitems(iwm_devices); i++)
if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
pci_get_device(dev) == iwm_devices[i].device) {
device_set_desc(dev, iwm_devices[i].name);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
}
static int
iwm_dev_check(device_t dev)
{
struct iwm_softc *sc;
sc = device_get_softc(dev);
switch (pci_get_device(dev)) {
case PCI_PRODUCT_INTEL_WL_3160_1:
case PCI_PRODUCT_INTEL_WL_3160_2:
sc->sc_fwname = "iwm3160fw";
sc->host_interrupt_operation_mode = 1;
return (0);
case PCI_PRODUCT_INTEL_WL_7260_1:
case PCI_PRODUCT_INTEL_WL_7260_2:
sc->sc_fwname = "iwm7260fw";
sc->host_interrupt_operation_mode = 1;
return (0);
case PCI_PRODUCT_INTEL_WL_7265_1:
case PCI_PRODUCT_INTEL_WL_7265_2:
sc->sc_fwname = "iwm7265fw";
sc->host_interrupt_operation_mode = 0;
return (0);
default:
device_printf(dev, "unknown adapter type\n");
return ENXIO;
}
}
static int
iwm_pci_attach(device_t dev)
{
struct iwm_softc *sc;
int count, error, rid;
uint16_t reg;
sc = device_get_softc(dev);
/* Clear device-specific "PCI retry timeout" register (41h). */
reg = pci_read_config(dev, 0x40, sizeof(reg));
pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
/* Enable bus-mastering and hardware bug workaround. */
pci_enable_busmaster(dev);
reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
/* if !MSI */
if (reg & PCIM_STATUS_INTxSTATE) {
reg &= ~PCIM_STATUS_INTxSTATE;
}
pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
rid = PCIR_BAR(0);
sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->sc_mem == NULL) {
device_printf(sc->sc_dev, "can't map mem space\n");
return (ENXIO);
}
sc->sc_st = rman_get_bustag(sc->sc_mem);
sc->sc_sh = rman_get_bushandle(sc->sc_mem);
/* Install interrupt handler. */
count = 1;
rid = 0;
if (pci_alloc_msi(dev, &count) == 0)
rid = 1;
sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
(rid != 0 ? 0 : RF_SHAREABLE));
if (sc->sc_irq == NULL) {
device_printf(dev, "can't map interrupt\n");
return (ENXIO);
}
error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, iwm_intr, sc, &sc->sc_ih);
if (sc->sc_ih == NULL) {
device_printf(dev, "can't establish interrupt");
return (ENXIO);
}
sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
return (0);
}
static void
iwm_pci_detach(device_t dev)
{
struct iwm_softc *sc = device_get_softc(dev);
if (sc->sc_irq != NULL) {
bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
bus_release_resource(dev, SYS_RES_IRQ,
rman_get_rid(sc->sc_irq), sc->sc_irq);
pci_release_msi(dev);
}
if (sc->sc_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->sc_mem), sc->sc_mem);
}
static int
iwm_attach(device_t dev)
{
- struct iwm_softc *sc;
- struct ieee80211com *ic;
- struct ifnet *ifp;
+ struct iwm_softc *sc = device_get_softc(dev);
+ struct ieee80211com *ic = &sc->sc_ic;
int error;
int txq_i, i;
- sc = device_get_softc(dev);
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, "iwm_mtx", MTX_DEF, 0);
-
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->sc_tq);
error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
if (error != 0) {
device_printf(dev, "can't start threads, error %d\n",
error);
goto fail;
}
/* PCI attach */
error = iwm_pci_attach(dev);
if (error != 0)
goto fail;
sc->sc_wantresp = -1;
/* Check device type */
error = iwm_dev_check(dev);
if (error != 0)
goto fail;
sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
/*
* We now start fiddling with the hardware
*/
sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
if (iwm_prepare_card_hw(sc) != 0) {
device_printf(dev, "could not initialize hardware\n");
goto fail;
}
/* Allocate DMA memory for firmware transfers. */
if ((error = iwm_alloc_fwmem(sc)) != 0) {
device_printf(dev, "could not allocate memory for firmware\n");
goto fail;
}
/* Allocate "Keep Warm" page. */
if ((error = iwm_alloc_kw(sc)) != 0) {
device_printf(dev, "could not allocate keep warm page\n");
goto fail;
}
/* We use ICT interrupts */
if ((error = iwm_alloc_ict(sc)) != 0) {
device_printf(dev, "could not allocate ICT table\n");
goto fail;
}
/* Allocate TX scheduler "rings". */
if ((error = iwm_alloc_sched(sc)) != 0) {
device_printf(dev, "could not allocate TX scheduler rings\n");
goto fail;
}
/* Allocate TX rings */
for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
if ((error = iwm_alloc_tx_ring(sc,
&sc->txq[txq_i], txq_i)) != 0) {
device_printf(dev,
"could not allocate TX ring %d\n",
txq_i);
goto fail;
}
}
/* Allocate RX ring. */
if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
device_printf(dev, "could not allocate RX ring\n");
goto fail;
}
/* Clear pending interrupts. */
IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
- sc->sc_ifp = ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- goto fail;
- }
- ifp->if_softc = sc;
- if_initname(ifp, "iwm", device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = iwm_init;
- ifp->if_ioctl = iwm_ioctl;
- ifp->if_start = iwm_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- /*
- * Set it here so we can initialise net80211.
- * But, if we fail before we call net80211_ifattach(),
- * we can't just call iwm_detach() or it'll free
- * net80211 without it having been setup.
- */
- sc->sc_ic = ic = ifp->if_l2com;
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(sc->sc_dev);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
/* Set device capabilities. */
ic->ic_caps =
IEEE80211_C_STA |
IEEE80211_C_WPA | /* WPA/RSN */
IEEE80211_C_WME |
IEEE80211_C_SHSLOT | /* short slot time supported */
IEEE80211_C_SHPREAMBLE /* short preamble supported */
// IEEE80211_C_BGSCAN /* capable of bg scanning */
;
for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
sc->sc_phyctxt[i].id = i;
sc->sc_phyctxt[i].color = 0;
sc->sc_phyctxt[i].ref = 0;
sc->sc_phyctxt[i].channel = NULL;
}
/* Max RSSI */
sc->sc_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
sc->sc_preinit_hook.ich_func = iwm_preinit;
sc->sc_preinit_hook.ich_arg = sc;
if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
device_printf(dev, "config_intrhook_establish failed\n");
goto fail;
}
#ifdef IWM_DEBUG
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
#endif
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
"<-%s\n", __func__);
return 0;
/* Free allocated memory if something failed during attachment. */
fail:
iwm_detach_local(sc, 0);
return ENXIO;
}
static int
iwm_update_edca(struct ieee80211com *ic)
{
struct iwm_softc *sc = ic->ic_softc;
device_printf(sc->sc_dev, "%s: called\n", __func__);
return (0);
}
static void
iwm_preinit(void *arg)
{
struct iwm_softc *sc = arg;
device_t dev = sc->sc_dev;
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
int error;
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
"->%s\n", __func__);
IWM_LOCK(sc);
if ((error = iwm_start_hw(sc)) != 0) {
device_printf(dev, "could not initialize hardware\n");
IWM_UNLOCK(sc);
goto fail;
}
error = iwm_run_init_mvm_ucode(sc, 1);
iwm_stop_device(sc);
if (error) {
IWM_UNLOCK(sc);
goto fail;
}
device_printf(dev,
"revision: 0x%x, firmware %d.%d (API ver. %d)\n",
sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
IWM_UCODE_MAJOR(sc->sc_fwver),
IWM_UCODE_MINOR(sc->sc_fwver),
IWM_UCODE_API(sc->sc_fwver));
/* not all hardware can do 5GHz band */
if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
IWM_UNLOCK(sc);
/*
* At this point we've committed - if we fail to do setup,
* we now also have to tear down the net80211 state.
*/
- ieee80211_ifattach(ic, sc->sc_bssid);
+ ieee80211_ifattach(ic);
ic->ic_vap_create = iwm_vap_create;
ic->ic_vap_delete = iwm_vap_delete;
ic->ic_raw_xmit = iwm_raw_xmit;
ic->ic_node_alloc = iwm_node_alloc;
ic->ic_scan_start = iwm_scan_start;
ic->ic_scan_end = iwm_scan_end;
ic->ic_update_mcast = iwm_update_mcast;
ic->ic_set_channel = iwm_set_channel;
ic->ic_scan_curchan = iwm_scan_curchan;
ic->ic_scan_mindwell = iwm_scan_mindwell;
ic->ic_wme.wme_update = iwm_update_edca;
+ ic->ic_parent = iwm_parent;
+ ic->ic_transmit = iwm_transmit;
iwm_radiotap_attach(sc);
if (bootverbose)
ieee80211_announce(ic);
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
"<-%s\n", __func__);
config_intrhook_disestablish(&sc->sc_preinit_hook);
return;
fail:
config_intrhook_disestablish(&sc->sc_preinit_hook);
iwm_detach_local(sc, 0);
}
/*
* Attach the interface to 802.11 radiotap.
*/
static void
iwm_radiotap_attach(struct iwm_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
"->%s begin\n", __func__);
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
IWM_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
IWM_RX_RADIOTAP_PRESENT);
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
"->%s end\n", __func__);
}
static struct ieee80211vap *
iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct iwm_vap *ivp;
struct ieee80211vap *vap;
- uint8_t mac1[IEEE80211_ADDR_LEN];
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
- IEEE80211_ADDR_COPY(mac1, mac);
- ivp = (struct iwm_vap *) malloc(sizeof(struct iwm_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (ivp == NULL)
- return NULL;
+ ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &ivp->iv_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1);
- IEEE80211_ADDR_COPY(ivp->macaddr, mac1);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
vap->iv_bmissthreshold = 10; /* override default */
/* Override with driver methods. */
ivp->iv_newstate = vap->iv_newstate;
vap->iv_newstate = iwm_newstate;
ieee80211_ratectl_init(vap);
/* Complete setup. */
- ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, iwm_media_change, ieee80211_media_status,
+ mac);
ic->ic_opmode = opmode;
return vap;
}
static void
iwm_vap_delete(struct ieee80211vap *vap)
{
struct iwm_vap *ivp = IWM_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(ivp, M_80211_VAP);
}
static void
iwm_scan_start(struct ieee80211com *ic)
{
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct iwm_softc *sc = ic->ic_softc;
int error;
if (sc->sc_scanband)
return;
IWM_LOCK(sc);
error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
if (error) {
device_printf(sc->sc_dev, "could not initiate scan\n");
IWM_UNLOCK(sc);
ieee80211_cancel_scan(vap);
} else
IWM_UNLOCK(sc);
}
static void
iwm_scan_end(struct ieee80211com *ic)
{
}
static void
iwm_update_mcast(struct ieee80211com *ic)
{
}
static void
iwm_set_channel(struct ieee80211com *ic)
{
}
static void
iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
{
}
static void
iwm_scan_mindwell(struct ieee80211_scan_state *ss)
{
return;
}
void
iwm_init_task(void *arg1)
{
struct iwm_softc *sc = arg1;
- struct ifnet *ifp = sc->sc_ifp;
IWM_LOCK(sc);
while (sc->sc_flags & IWM_FLAG_BUSY)
msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
sc->sc_flags |= IWM_FLAG_BUSY;
- iwm_stop_locked(ifp);
- if ((ifp->if_flags & IFF_UP) &&
- (ifp->if_drv_flags & IFF_DRV_RUNNING))
+ iwm_stop(sc);
+ if (sc->sc_ic.ic_nrunning > 0)
iwm_init(sc);
sc->sc_flags &= ~IWM_FLAG_BUSY;
wakeup(&sc->sc_flags);
IWM_UNLOCK(sc);
}
static int
iwm_resume(device_t dev)
{
uint16_t reg;
/* Clear device-specific "PCI retry timeout" register (41h). */
reg = pci_read_config(dev, 0x40, sizeof(reg));
pci_write_config(dev, 0x40, reg & ~0xff00, sizeof(reg));
iwm_init_task(device_get_softc(dev));
return 0;
}
static int
iwm_suspend(device_t dev)
{
struct iwm_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- iwm_stop(ifp, 0);
+ if (sc->sc_ic.ic_nrunning > 0) {
+ IWM_LOCK(sc);
+ iwm_stop(sc);
+ IWM_UNLOCK(sc);
+ }
return (0);
}
static int
iwm_detach_local(struct iwm_softc *sc, int do_net80211)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic;
struct iwm_fw_info *fw = &sc->sc_fw;
device_t dev = sc->sc_dev;
int i;
if (sc->sc_tq) {
taskqueue_drain_all(sc->sc_tq);
taskqueue_free(sc->sc_tq);
}
- if (ifp) {
- callout_drain(&sc->sc_watchdog_to);
- ic = sc->sc_ic;
- iwm_stop_device(sc);
- if (ic && do_net80211)
- ieee80211_ifdetach(ic);
- if_free(ifp);
- }
+ callout_drain(&sc->sc_watchdog_to);
+ iwm_stop_device(sc);
+ if (do_net80211)
+ ieee80211_ifdetach(&sc->sc_ic);
/* Free descriptor rings */
for (i = 0; i < nitems(sc->txq); i++)
iwm_free_tx_ring(sc, &sc->txq[i]);
/* Free firmware */
if (fw->fw_rawdata != NULL)
iwm_fw_info_free(fw);
/* free scheduler */
iwm_free_sched(sc);
if (sc->ict_dma.vaddr != NULL)
iwm_free_ict(sc);
if (sc->kw_dma.vaddr != NULL)
iwm_free_kw(sc);
if (sc->fw_dma.vaddr != NULL)
iwm_free_fwmem(sc);
/* Finished with the hardware - detach things */
iwm_pci_detach(dev);
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static int
iwm_detach(device_t dev)
{
struct iwm_softc *sc = device_get_softc(dev);
return (iwm_detach_local(sc, 1));
}
static device_method_t iwm_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, iwm_probe),
DEVMETHOD(device_attach, iwm_attach),
DEVMETHOD(device_detach, iwm_detach),
DEVMETHOD(device_suspend, iwm_suspend),
DEVMETHOD(device_resume, iwm_resume),
DEVMETHOD_END
};
static driver_t iwm_pci_driver = {
"iwm",
iwm_pci_methods,
sizeof (struct iwm_softc)
};
static devclass_t iwm_devclass;
DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL);
MODULE_DEPEND(iwm, firmware, 1, 1, 1);
MODULE_DEPEND(iwm, pci, 1, 1, 1);
MODULE_DEPEND(iwm, wlan, 1, 1, 1);
Index: head/sys/dev/iwm/if_iwm_mac_ctxt.c
===================================================================
--- head/sys/dev/iwm/if_iwm_mac_ctxt.c (revision 287196)
+++ head/sys/dev/iwm/if_iwm_mac_ctxt.c (revision 287197)
@@ -1,533 +1,533 @@
/* $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
/*
* Copyright (c) 2014 genua mbh <info@genua.de>
* Copyright (c) 2014 Fixup Software Ltd.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*-
* Based on BSD-licensed source modules in the Linux iwlwifi driver,
* which were used as the reference documentation for this implementation.
*
* Driver version we are currently based off of is
* Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
*
***********************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/linker.h>
#include <machine/bus.h>
#include <machine/endian.h>
#include <machine/resource.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/iwm/if_iwmreg.h>
#include <dev/iwm/if_iwmvar.h>
#include <dev/iwm/if_iwm_debug.h>
#include <dev/iwm/if_iwm_util.h>
#include <dev/iwm/if_iwm_mac_ctxt.h>
/*
* BEGIN mvm/mac-ctxt.c
*/
static void
iwm_mvm_ack_rates(struct iwm_softc *sc, int is2ghz,
int *cck_rates, int *ofdm_rates)
{
int lowest_present_ofdm = 100;
int lowest_present_cck = 100;
uint8_t cck = 0;
uint8_t ofdm = 0;
int i;
if (is2ghz) {
for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
cck |= (1 << i);
if (lowest_present_cck > i)
lowest_present_cck = i;
}
}
for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
int adj = i - IWM_FIRST_OFDM_RATE;
ofdm |= (1 << adj);
if (lowest_present_ofdm > i)
lowest_present_ofdm = i;
}
/*
* Now we've got the basic rates as bitmaps in the ofdm and cck
* variables. This isn't sufficient though, as there might not
* be all the right rates in the bitmap. E.g. if the only basic
* rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
* and 6 Mbps because the 802.11-2007 standard says in 9.6:
*
* [...] a STA responding to a received frame shall transmit
* its Control Response frame [...] at the highest rate in the
* BSSBasicRateSet parameter that is less than or equal to the
* rate of the immediately previous frame in the frame exchange
* sequence ([...]) and that is of the same modulation class
* ([...]) as the received frame. If no rate contained in the
* BSSBasicRateSet parameter meets these conditions, then the
* control frame sent in response to a received frame shall be
* transmitted at the highest mandatory rate of the PHY that is
* less than or equal to the rate of the received frame, and
* that is of the same modulation class as the received frame.
*
* As a consequence, we need to add all mandatory rates that are
* lower than all of the basic rates to these bitmaps.
*/
if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
/* 6M already there or needed so always add */
ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
/*
* CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
* Note, however:
* - if no CCK rates are basic, it must be ERP since there must
* be some basic rates at all, so they're OFDM => ERP PHY
* (or we're in 5 GHz, and the cck bitmap will never be used)
* - if 11M is a basic rate, it must be ERP as well, so add 5.5M
* - if 5.5M is basic, 1M and 2M are mandatory
* - if 2M is basic, 1M is mandatory
* - if 1M is basic, that's the only valid ACK rate.
* As a consequence, it's not as complicated as it sounds, just add
* any lower rates to the ACK rate bitmap.
*/
if (IWM_RATE_11M_INDEX < lowest_present_cck)
cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
if (IWM_RATE_5M_INDEX < lowest_present_cck)
cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
if (IWM_RATE_2M_INDEX < lowest_present_cck)
cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
/* 1M already there or needed so always add */
cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
*cck_rates = cck;
*ofdm_rates = ofdm;
}
static void
iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
struct iwm_mac_ctx_cmd *cmd, uint32_t action)
{
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni = vap->iv_bss;
int cck_ack_rates, ofdm_ack_rates;
int i;
int is2ghz;
/*
* id is the MAC address ID - something to do with MAC filtering.
* color - not sure.
*
* These are both functions of the vap, not of the node.
* So, for now, hard-code both to 0 (default).
*/
cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
IWM_DEFAULT_COLOR));
cmd->action = htole32(action);
cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
/*
* The TSF ID is one of four TSF tracking resources in the firmware.
* Read the iwlwifi/mvm code for more details.
*
* For now, just hard-code it to TSF tracking ID 0; we only support
* a single STA mode VAP.
*
* It's per-vap, not per-node.
*/
cmd->tsf_id = htole32(IWM_DEFAULT_TSFID);
- IEEE80211_ADDR_COPY(cmd->node_addr, sc->sc_bssid);
+ IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_macaddr);
/*
* XXX should we error out if in_assoc is 1 and ni == NULL?
*/
if (in->in_assoc) {
IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
} else {
/* eth broadcast address */
memset(cmd->bssid_addr, 0xff, sizeof(cmd->bssid_addr));
}
/*
* Default to 2ghz if no node information is given.
*/
if (in) {
is2ghz = !! IEEE80211_IS_CHAN_2GHZ(in->in_ni.ni_chan);
} else {
is2ghz = 1;
}
iwm_mvm_ack_rates(sc, is2ghz, &cck_ack_rates, &ofdm_ack_rates);
cmd->cck_rates = htole32(cck_ack_rates);
cmd->ofdm_rates = htole32(ofdm_ack_rates);
cmd->cck_short_preamble
= htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
cmd->short_slot
= htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
? IWM_MAC_FLG_SHORT_SLOT : 0);
/* XXX TODO: set wme parameters; also handle getting updated wme parameters */
for (i = 0; i < IWM_AC_NUM+1; i++) {
int txf = i;
cmd->ac[txf].cw_min = htole16(0x0f);
cmd->ac[txf].cw_max = htole16(0x3f);
cmd->ac[txf].aifsn = 1;
cmd->ac[txf].fifos_mask = (1 << txf);
cmd->ac[txf].edca_txop = 0;
}
if (ic->ic_flags & IEEE80211_F_USEPROT)
cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
}
static int
iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
{
int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
sizeof(*cmd), cmd);
if (ret)
device_printf(sc->sc_dev,
"%s: Failed to send MAC context (action:%d): %d\n",
__func__, le32toh(cmd->action), ret);
return ret;
}
/*
* Fill the specific data for mac context of type station or p2p client
*/
static void
iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
{
struct ieee80211_node *ni = &in->in_ni;
unsigned dtim_period, dtim_count;
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
/* will this work? */
dtim_period = vap->iv_dtim_period;
dtim_count = vap->iv_dtim_count;
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_BEACON | IWM_DEBUG_CMD,
"%s: force_assoc_off=%d\n", __func__, force_assoc_off);
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_BEACON | IWM_DEBUG_CMD,
"DTIM: period=%d count=%d\n", dtim_period, dtim_count);
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_BEACON | IWM_DEBUG_CMD,
"BEACON: tsf: %llu, ni_intval=%d\n",
(unsigned long long) le64toh(ni->ni_tstamp.tsf),
ni->ni_intval);
/* We need the dtim_period to set the MAC as associated */
if (in->in_assoc && dtim_period && !force_assoc_off) {
uint64_t tsf;
uint32_t dtim_offs;
/*
* The DTIM count counts down, so when it is N that means N
* more beacon intervals happen until the DTIM TBTT. Therefore
* add this to the current time. If that ends up being in the
* future, the firmware will handle it.
*
* Also note that the system_timestamp (which we get here as
* "sync_device_ts") and TSF timestamp aren't at exactly the
* same offset in the frame -- the TSF is at the first symbol
* of the TSF, the system timestamp is at signal acquisition
* time. This means there's an offset between them of at most
* a few hundred microseconds (24 * 8 bits + PLCP time gives
* 384us in the longest case), this is currently not relevant
* as the firmware wakes up around 2ms before the TBTT.
*/
dtim_offs = dtim_count * ni->ni_intval;
/* convert TU to usecs */
dtim_offs *= 1024;
/*
* net80211: TSF is in 802.11 order, so convert up to local
* ordering before we manipulate things.
*/
tsf = le64toh(ni->ni_tstamp.tsf);
ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
ctxt_sta->dtim_time = htole32(tsf + dtim_offs);
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_BEACON | IWM_DEBUG_CMD,
"DTIM TBTT is 0x%llx/0x%x, offset %d\n",
(long long)le64toh(ctxt_sta->dtim_tsf),
le32toh(ctxt_sta->dtim_time), dtim_offs);
ctxt_sta->is_assoc = htole32(1);
} else {
ctxt_sta->is_assoc = htole32(0);
}
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_CMD | IWM_DEBUG_BEACON,
"%s: ni_intval: %d, bi_reciprocal: %d, dtim_interval: %d, dtim_reciprocal: %d\n",
__func__,
ni->ni_intval,
iwm_mvm_reciprocal(ni->ni_intval),
ni->ni_intval * dtim_period,
iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
ctxt_sta->bi = htole32(ni->ni_intval);
ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
ctxt_sta->dtim_reciprocal =
htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
/* 10 = CONN_MAX_LISTEN_INTERVAL */
ctxt_sta->listen_interval = htole32(10);
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_CMD | IWM_DEBUG_BEACON,
"%s: associd=%d\n", __func__, IEEE80211_AID(ni->ni_associd));
ctxt_sta->assoc_id = htole32(IEEE80211_AID(ni->ni_associd));
}
static int
iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct ieee80211vap *vap,
uint32_t action)
{
struct ieee80211_node *ni = vap->iv_bss;
struct iwm_node *in = (struct iwm_node *) ni;
struct iwm_mac_ctx_cmd cmd;
IWM_DPRINTF(sc, IWM_DEBUG_RESET,
"%s: called; action=%d\n", __func__, action);
memset(&cmd, 0, sizeof(cmd));
/* Fill the common data for all mac context types */
iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
/* Allow beacons to pass through as long as we are not associated,or we
* do not have dtim period information */
if (!in->in_assoc || !vap->iv_dtim_period)
cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
else
cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
/* Fill the data specific for station mode */
iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
&cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
}
static int
iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct ieee80211vap *vap,
uint32_t action)
{
int ret;
ret = iwm_mvm_mac_ctxt_cmd_station(sc, vap, action);
if (ret)
return (ret);
return (0);
}
int
iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct ieee80211vap *vap)
{
struct iwm_vap *iv = IWM_VAP(vap);
int ret;
if (iv->is_uploaded != 0) {
device_printf(sc->sc_dev, "%s: called; uploaded != 0\n",
__func__);
return (EIO);
}
ret = iwm_mvm_mac_ctx_send(sc, vap, IWM_FW_CTXT_ACTION_ADD);
if (ret)
return (ret);
iv->is_uploaded = 1;
return (0);
}
int
iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct ieee80211vap *vap)
{
struct iwm_vap *iv = IWM_VAP(vap);
int ret;
if (iv->is_uploaded == 0) {
device_printf(sc->sc_dev, "%s: called; uploaded = 0\n",
__func__);
return (EIO);
}
ret = iwm_mvm_mac_ctx_send(sc, vap, IWM_FW_CTXT_ACTION_MODIFY);
if (ret)
return (ret);
return (0);
}
#if 0
static int
iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
{
struct iwm_mac_ctx_cmd cmd;
int ret;
if (!in->in_uploaded) {
device_printf(sc->sc_dev,
"attempt to remove !uploaded node %p", in);
return EIO;
}
memset(&cmd, 0, sizeof(cmd));
cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
IWM_DEFAULT_COLOR));
cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
ret = iwm_mvm_send_cmd_pdu(sc,
IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
if (ret) {
device_printf(sc->sc_dev,
"Failed to remove MAC context: %d\n", ret);
return ret;
}
in->in_uploaded = 0;
return 0;
}
#endif
Index: head/sys/dev/iwm/if_iwm_phy_ctxt.c
===================================================================
--- head/sys/dev/iwm/if_iwm_phy_ctxt.c (revision 287196)
+++ head/sys/dev/iwm/if_iwm_phy_ctxt.c (revision 287197)
@@ -1,305 +1,305 @@
/* $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
/*
* Copyright (c) 2014 genua mbh <info@genua.de>
* Copyright (c) 2014 Fixup Software Ltd.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*-
* Based on BSD-licensed source modules in the Linux iwlwifi driver,
* which were used as the reference documentation for this implementation.
*
* Driver version we are currently based off of is
* Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
*
***********************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/linker.h>
#include <machine/bus.h>
#include <machine/endian.h>
#include <machine/resource.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/iwm/if_iwmreg.h>
#include <dev/iwm/if_iwmvar.h>
#include <dev/iwm/if_iwm_debug.h>
#include <dev/iwm/if_iwm_util.h>
#include <dev/iwm/if_iwm_phy_ctxt.h>
/*
* BEGIN iwlwifi/mvm/phy-ctxt.c
*/
/*
* Construct the generic fields of the PHY context command
*/
static void
iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
{
memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_CMD,
"%s: id=%d, colour=%d, action=%d, apply_time=%d\n",
__func__,
ctxt->id,
ctxt->color,
action,
apply_time);
cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
ctxt->color));
cmd->action = htole32(action);
cmd->apply_time = htole32(apply_time);
}
/*
* Add the phy configuration to the PHY context command
*/
static void
iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
uint8_t chains_static, uint8_t chains_dynamic)
{
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t active_cnt, idle_cnt;
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_CMD,
"%s: 2ghz=%d, channel=%d, chains static=0x%x, dynamic=0x%x, "
"rx_ant=0x%x, tx_ant=0x%x\n",
__func__,
!! IEEE80211_IS_CHAN_2GHZ(chan),
ieee80211_chan2ieee(ic, chan),
chains_static,
chains_dynamic,
IWM_FW_VALID_RX_ANT(sc),
IWM_FW_VALID_TX_ANT(sc));
cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
/* Set rx the chains */
idle_cnt = chains_static;
active_cnt = chains_dynamic;
cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
IWM_PHY_RX_CHAIN_VALID_POS);
cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
cmd->rxchain_info |= htole32(active_cnt <<
IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
}
/*
* Send a command
* only if something in the configuration changed: in case that this is the
* first time that the phy configuration is applied or in case that the phy
* configuration changed from the previous apply.
*/
static int
iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
struct iwm_mvm_phy_ctxt *ctxt,
uint8_t chains_static, uint8_t chains_dynamic,
uint32_t action, uint32_t apply_time)
{
struct iwm_phy_context_cmd cmd;
int ret;
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_CMD,
"%s: called; channel=%p\n",
__func__,
ctxt->channel);
/* Set the command header fields */
iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
/* Set the command data */
iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
chains_static, chains_dynamic);
ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
sizeof(struct iwm_phy_context_cmd), &cmd);
if (ret) {
device_printf(sc->sc_dev,
"PHY ctxt cmd error. ret=%d\n", ret);
}
return ret;
}
/*
* Send a command to add a PHY context based on the current HW configuration.
*/
int
iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
struct ieee80211_channel *chan,
uint8_t chains_static, uint8_t chains_dynamic)
{
ctxt->channel = chan;
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_CMD,
"%s: called; channel=%d\n",
__func__,
- ieee80211_chan2ieee(sc->sc_ic, chan));
+ ieee80211_chan2ieee(&sc->sc_ic, chan));
return iwm_mvm_phy_ctxt_apply(sc, ctxt,
chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
}
/*
* Send a command to modify the PHY context based on the current HW
* configuration. Note that the function does not check that the configuration
* changed.
*/
int
iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
uint8_t chains_static, uint8_t chains_dynamic)
{
ctxt->channel = chan;
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_CMD,
"%s: called; channel=%d\n",
__func__,
- ieee80211_chan2ieee(sc->sc_ic, chan));
+ ieee80211_chan2ieee(&sc->sc_ic, chan));
return iwm_mvm_phy_ctxt_apply(sc, ctxt,
chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
}
/*
* END iwlwifi/mvm/phy-ctxt.c
*/
Index: head/sys/dev/iwm/if_iwm_power.c
===================================================================
--- head/sys/dev/iwm/if_iwm_power.c (revision 287196)
+++ head/sys/dev/iwm/if_iwm_power.c (revision 287197)
@@ -1,370 +1,370 @@
/* $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
/*
* Copyright (c) 2014 genua mbh <info@genua.de>
* Copyright (c) 2014 Fixup Software Ltd.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*-
* Based on BSD-licensed source modules in the Linux iwlwifi driver,
* which were used as the reference documentation for this implementation.
*
* Driver version we are currently based off of is
* Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
*
***********************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/linker.h>
#include <machine/bus.h>
#include <machine/endian.h>
#include <machine/resource.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/iwm/if_iwmreg.h>
#include <dev/iwm/if_iwmvar.h>
#include <dev/iwm/if_iwm_debug.h>
#include <dev/iwm/if_iwm_util.h>
#include <dev/iwm/if_iwm_power.h>
/*
* BEGIN mvm/power.c
*/
#define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
static int
iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
struct iwm_beacon_filter_cmd *cmd)
{
int ret;
ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
if (!ret) {
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"ba_enable_beacon_abort is: %d\n",
le32toh(cmd->ba_enable_beacon_abort));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"ba_escape_timer is: %d\n",
le32toh(cmd->ba_escape_timer));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"bf_debug_flag is: %d\n",
le32toh(cmd->bf_debug_flag));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"bf_enable_beacon_filter is: %d\n",
le32toh(cmd->bf_enable_beacon_filter));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"bf_energy_delta is: %d\n",
le32toh(cmd->bf_energy_delta));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"bf_escape_timer is: %d\n",
le32toh(cmd->bf_escape_timer));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"bf_roaming_energy_delta is: %d\n",
le32toh(cmd->bf_roaming_energy_delta));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"bf_roaming_state is: %d\n",
le32toh(cmd->bf_roaming_state));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"bf_temp_threshold is: %d\n",
le32toh(cmd->bf_temp_threshold));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"bf_temp_fast_filter is: %d\n",
le32toh(cmd->bf_temp_fast_filter));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"bf_temp_slow_filter is: %d\n",
le32toh(cmd->bf_temp_slow_filter));
}
return ret;
}
static void
iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
{
cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
}
static int
iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
int enable)
{
struct iwm_beacon_filter_cmd cmd = {
IWM_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = htole32(1),
.ba_enable_beacon_abort = htole32(enable),
};
if (!sc->sc_bf.bf_enabled)
return 0;
sc->sc_bf.ba_enabled = enable;
iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
}
static void
iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
{
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"Sending power table command on mac id 0x%X for "
"power level %d, flags = 0x%X\n",
cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags));
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds));
if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"Disable power management\n");
return;
}
KASSERT(0, ("unhandled power management"));
#if 0
DPRINTF(mvm, "Rx timeout = %u usec\n",
le32_to_cpu(cmd->rx_data_timeout));
DPRINTF(mvm, "Tx timeout = %u usec\n",
le32_to_cpu(cmd->tx_data_timeout));
if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
DPRINTF(mvm, "DTIM periods to skip = %u\n",
cmd->skip_dtim_periods);
if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
cmd->lprx_rssi_threshold);
if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
DPRINTF(mvm, "uAPSD enabled\n");
DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
le32_to_cpu(cmd->rx_data_timeout_uapsd));
DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
le32_to_cpu(cmd->tx_data_timeout_uapsd));
DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
}
#endif
}
static void
iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
struct iwm_mac_power_cmd *cmd)
{
struct ieee80211_node *ni = &in->in_ni;
int dtimper, dtimper_msec;
int keep_alive;
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
IWM_DEFAULT_COLOR));
dtimper = vap->iv_dtim_period ?: 1;
/*
* Regardless of power management state the driver must set
* keep alive period. FW will use it for sending keep alive NDPs
* immediately after association. Check that keep alive period
* is at least 3 * DTIM
*/
dtimper_msec = dtimper * ni->ni_intval;
keep_alive
= MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
keep_alive = roundup(keep_alive, 1000) / 1000;
cmd->keep_alive_seconds = htole16(keep_alive);
}
int
iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
{
int ret;
int ba_enable;
struct iwm_mac_power_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
iwm_mvm_power_build_cmd(sc, in, &cmd);
iwm_mvm_power_log(sc, &cmd);
if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
return ret;
ba_enable = !!(cmd.flags &
htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
}
int
iwm_mvm_power_update_device(struct iwm_softc *sc)
{
struct iwm_device_power_cmd cmd = {
.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
};
if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
return 0;
cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE | IWM_DEBUG_CMD,
"Sending device power command with flags = 0x%X\n", cmd.flags);
return iwm_mvm_send_cmd_pdu(sc,
IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
}
int
iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
{
struct iwm_beacon_filter_cmd cmd = {
IWM_BF_CMD_CONFIG_DEFAULTS,
.bf_enable_beacon_filter = htole32(1),
};
int ret;
iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
if (ret == 0)
sc->sc_bf.bf_enabled = 1;
return ret;
}
int
iwm_mvm_disable_beacon_filter(struct iwm_softc *sc)
{
struct iwm_beacon_filter_cmd cmd;
int ret;
memset(&cmd, 0, sizeof(cmd));
if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
return 0;
ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
if (ret == 0)
sc->sc_bf.bf_enabled = 0;
return ret;
}
#if 0
static int
iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
{
if (!sc->sc_bf.bf_enabled)
return 0;
return iwm_mvm_enable_beacon_filter(sc, in);
}
#endif
Index: head/sys/dev/iwm/if_iwm_scan.c
===================================================================
--- head/sys/dev/iwm/if_iwm_scan.c (revision 287196)
+++ head/sys/dev/iwm/if_iwm_scan.c (revision 287197)
@@ -1,448 +1,448 @@
/* $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
/*
* Copyright (c) 2014 genua mbh <info@genua.de>
* Copyright (c) 2014 Fixup Software Ltd.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*-
* Based on BSD-licensed source modules in the Linux iwlwifi driver,
* which were used as the reference documentation for this implementation.
*
* Driver version we are currently based off of is
* Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
*
***********************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/linker.h>
#include <machine/bus.h>
#include <machine/endian.h>
#include <machine/resource.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/iwm/if_iwmreg.h>
#include <dev/iwm/if_iwmvar.h>
#include <dev/iwm/if_iwm_debug.h>
#include <dev/iwm/if_iwm_util.h>
#include <dev/iwm/if_iwm_scan.h>
/*
* BEGIN mvm/scan.c
*/
#define IWM_PLCP_QUIET_THRESH 1
#define IWM_ACTIVE_QUIET_TIME 10
#define LONG_OUT_TIME_PERIOD (600 * IEEE80211_DUR_TU)
#define SHORT_OUT_TIME_PERIOD (200 * IEEE80211_DUR_TU)
#define SUSPEND_TIME_PERIOD (100 * IEEE80211_DUR_TU)
static uint16_t
iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
{
uint16_t rx_chain;
uint8_t rx_ant;
rx_ant = IWM_FW_VALID_RX_ANT(sc);
rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
return htole16(rx_chain);
}
static uint32_t
iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
{
if (!is_assoc)
return 0;
if (flags & 0x1)
return htole32(SHORT_OUT_TIME_PERIOD);
return htole32(LONG_OUT_TIME_PERIOD);
}
static uint32_t
iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
{
if (!is_assoc)
return 0;
return htole32(SUSPEND_TIME_PERIOD);
}
static uint32_t
iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
{
if (flags & IEEE80211_CHAN_2GHZ)
return htole32(IWM_PHY_BAND_24);
else
return htole32(IWM_PHY_BAND_5);
}
static uint32_t
iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
{
uint32_t tx_ant;
int i, ind;
for (i = 0, ind = sc->sc_scan_last_antenna;
i < IWM_RATE_MCS_ANT_NUM; i++) {
ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
sc->sc_scan_last_antenna = ind;
break;
}
}
tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
tx_ant);
else
return htole32(IWM_RATE_6M_PLCP | tx_ant);
}
/*
* If req->n_ssids > 0, it means we should do an active scan.
* In case of active scan w/o directed scan, we receive a zero-length SSID
* just to notify that this scan is active and not passive.
* In order to notify the FW of the number of SSIDs we wish to scan (including
* the zero-length one), we need to set the corresponding bits in chan->type,
* one for each SSID, and set the active bit (first). If the first SSID is
* already included in the probe template, so we need to set only
* req->n_ssids - 1 bits in addition to the first bit.
*/
static uint16_t
iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
{
if (flags & IEEE80211_CHAN_2GHZ)
return 30 + 3 * (n_ssids + 1);
return 20 + 2 * (n_ssids + 1);
}
static uint16_t
iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
{
return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
}
static int
iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
int flags, int n_ssids, int basic_ssid)
{
- struct ieee80211com *ic = sc->sc_ic;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
(cmd->data + le16toh(cmd->tx_cmd.len));
int type = (1 << n_ssids) - 1;
struct ieee80211_channel *c;
int nchan, j;
if (!basic_ssid)
type |= (1 << n_ssids);
for (nchan = j = 0; j < ic->ic_nchans; j++) {
c = &ic->ic_channels[j];
/* For 2GHz, only populate 11b channels */
/* For 5GHz, only populate 11a channels */
/*
* Catch other channels, in case we have 900MHz channels or
* something in the chanlist.
*/
if ((flags & IEEE80211_CHAN_2GHZ) && (! IEEE80211_IS_CHAN_B(c))) {
continue;
} else if ((flags & IEEE80211_CHAN_5GHZ) && (! IEEE80211_IS_CHAN_A(c))) {
continue;
} else {
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_EEPROM,
"%s: skipping channel (freq=%d, ieee=%d, flags=0x%08x)\n",
__func__,
c->ic_freq,
c->ic_ieee,
c->ic_flags);
}
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_EEPROM,
"Adding channel %d (%d Mhz) to the list\n",
nchan, c->ic_freq);
chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
chan->type = htole32(type);
if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = htole16(active_dwell);
chan->passive_dwell = htole16(passive_dwell);
chan->iteration_count = htole16(1);
chan++;
nchan++;
}
if (nchan == 0)
device_printf(sc->sc_dev,
"%s: NO CHANNEL!\n", __func__);
return nchan;
}
/*
* Fill in probe request with the following parameters:
* TA is our vif HW address, which mac80211 ensures we have.
* Packet is broadcasted, so this is both SA and DA.
* The probe request IE is made out of two: first comes the most prioritized
* SSID if a directed scan is requested. Second comes whatever extra
* information was given to us as the scan request IE.
*/
static uint16_t
iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
const uint8_t *ie, int ie_len, int left)
{
uint8_t *pos = NULL;
/* Make sure there is enough space for the probe request,
* two mandatory IEs and the data */
left -= sizeof(*frame);
if (left < 0)
return 0;
frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
IEEE80211_FC0_SUBTYPE_PROBE_REQ;
frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
IEEE80211_ADDR_COPY(frame->i_addr1, ieee80211broadcastaddr);
IEEE80211_ADDR_COPY(frame->i_addr2, ta);
IEEE80211_ADDR_COPY(frame->i_addr3, ieee80211broadcastaddr);
/* for passive scans, no need to fill anything */
if (n_ssids == 0)
return sizeof(*frame);
/* points to the payload of the request */
pos = (uint8_t *)frame + sizeof(*frame);
/* fill in our SSID IE */
left -= ssid_len + 2;
if (left < 0)
return 0;
pos = ieee80211_add_ssid(pos, ssid, ssid_len);
if (ie && ie_len && left >= ie_len) {
memcpy(pos, ie, ie_len);
pos += ie_len;
}
return pos - (uint8_t *)frame;
}
int
iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
int n_ssids, uint8_t *ssid, int ssid_len)
{
struct iwm_host_cmd hcmd = {
.id = IWM_SCAN_REQUEST_CMD,
.len = { 0, },
.data = { sc->sc_scan_cmd, },
.flags = IWM_CMD_SYNC,
.dataflags = { IWM_HCMD_DFL_NOCOPY, },
};
struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
int is_assoc = 0;
int ret;
uint32_t status;
int basic_ssid =
!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
"Handling ieee80211 scan request\n");
memset(cmd, 0, sc->sc_scan_cmd_len);
cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
IWM_MAC_FILTER_IN_BEACON);
cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
cmd->repeats = htole32(1);
/*
* If the user asked for passive scan, don't change to active scan if
* you see any activity on the channel - remain passive.
*/
if (n_ssids > 0) {
cmd->passive2active = htole16(1);
cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
#if 0
if (basic_ssid) {
ssid = req->ssids[0].ssid;
ssid_len = req->ssids[0].ssid_len;
}
#endif
} else {
cmd->passive2active = 0;
cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
}
cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
IWM_TX_CMD_FLG_BT_DIS);
cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
(struct ieee80211_frame *)cmd->data,
- sc->sc_bssid, n_ssids, ssid, ssid_len,
+ sc->sc_ic.ic_macaddr, n_ssids, ssid, ssid_len,
NULL, 0, sc->sc_capa_max_probe_len));
cmd->channel_count
= iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
le16toh(cmd->tx_cmd.len) +
(cmd->channel_count * sizeof(struct iwm_scan_channel)));
hcmd.len[0] = le16toh(cmd->len);
status = IWM_SCAN_RESPONSE_OK;
ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
if (!ret && status == IWM_SCAN_RESPONSE_OK) {
IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
"Scan request was sent successfully\n");
} else {
/*
* If the scan failed, it usually means that the FW was unable
* to allocate the time events. Warn on it, but maybe we
* should try to send the command again with different params.
*/
sc->sc_scanband = 0;
ret = EIO;
}
return ret;
}
Index: head/sys/dev/iwm/if_iwmvar.h
===================================================================
--- head/sys/dev/iwm/if_iwmvar.h (revision 287196)
+++ head/sys/dev/iwm/if_iwmvar.h (revision 287197)
@@ -1,534 +1,530 @@
/* $OpenBSD: if_iwmvar.h,v 1.7 2015/03/02 13:51:10 jsg Exp $ */
/* $FreeBSD$ */
/*
* Copyright (c) 2014 genua mbh <info@genua.de>
* Copyright (c) 2014 Fixup Software Ltd.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*-
* Based on BSD-licensed source modules in the Linux iwlwifi driver,
* which were used as the reference documentation for this implementation.
*
* Driver version we are currently based off of is
* Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
*
***********************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
* Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
struct iwm_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsft;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_dbm_antsignal;
int8_t wr_dbm_antnoise;
} __packed;
#define IWM_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
struct iwm_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
uint8_t wt_hwqueue;
} __packed;
#define IWM_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
#define IWM_UCODE_SECT_MAX 6
#define IWM_FWDMASEGSZ (192*1024)
/* sanity check value */
#define IWM_FWMAXSIZE (2*1024*1024)
/*
* fw_status is used to determine if we've already parsed the firmware file
*
* In addition to the following, status < 0 ==> -error
*/
#define IWM_FW_STATUS_NONE 0
#define IWM_FW_STATUS_INPROGRESS 1
#define IWM_FW_STATUS_DONE 2
#define IWM_LOCK(_sc) mtx_lock(&sc->sc_mtx)
#define IWM_UNLOCK(_sc) mtx_unlock(&sc->sc_mtx)
enum iwm_ucode_type {
IWM_UCODE_TYPE_INIT,
IWM_UCODE_TYPE_REGULAR,
IWM_UCODE_TYPE_WOW,
IWM_UCODE_TYPE_MAX
};
struct iwm_fw_info {
const void *fw_rawdata;
size_t fw_rawsize;
int fw_status;
struct iwm_fw_sects {
struct iwm_fw_onesect {
const void *fws_data;
uint32_t fws_len;
uint32_t fws_devoff;
} fw_sect[IWM_UCODE_SECT_MAX];
size_t fw_totlen;
int fw_count;
} fw_sects[IWM_UCODE_TYPE_MAX];
};
struct iwm_nvm_data {
int n_hw_addrs;
uint8_t hw_addr[IEEE80211_ADDR_LEN];
uint8_t calib_version;
uint16_t calib_voltage;
uint16_t raw_temperature;
uint16_t kelvin_temperature;
uint16_t kelvin_voltage;
uint16_t xtal_calib[2];
int sku_cap_band_24GHz_enable;
int sku_cap_band_52GHz_enable;
int sku_cap_11n_enable;
int sku_cap_amt_enable;
int sku_cap_ipan_enable;
uint8_t radio_cfg_type;
uint8_t radio_cfg_step;
uint8_t radio_cfg_dash;
uint8_t radio_cfg_pnum;
uint8_t valid_tx_ant, valid_rx_ant;
uint16_t nvm_version;
uint8_t max_tx_pwr_half_dbm;
};
/* max bufs per tfd the driver will use */
#define IWM_MAX_CMD_TBS_PER_TFD 2
struct iwm_rx_packet;
struct iwm_host_cmd {
const void *data[IWM_MAX_CMD_TBS_PER_TFD];
struct iwm_rx_packet *resp_pkt;
unsigned long _rx_page_addr;
uint32_t _rx_page_order;
int handler_status;
uint32_t flags;
uint16_t len[IWM_MAX_CMD_TBS_PER_TFD];
uint8_t dataflags[IWM_MAX_CMD_TBS_PER_TFD];
uint8_t id;
};
/*
* DMA glue is from iwn
*/
typedef caddr_t iwm_caddr_t;
typedef void *iwm_hookarg_t;
struct iwm_dma_info {
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_dma_segment_t seg;
bus_addr_t paddr;
void *vaddr;
bus_size_t size;
};
#define IWM_TX_RING_COUNT 256
#define IWM_TX_RING_LOMARK 192
#define IWM_TX_RING_HIMARK 224
struct iwm_tx_data {
bus_dmamap_t map;
bus_addr_t cmd_paddr;
bus_addr_t scratch_paddr;
struct mbuf *m;
struct iwm_node *in;
int done;
};
struct iwm_tx_ring {
struct iwm_dma_info desc_dma;
struct iwm_dma_info cmd_dma;
struct iwm_tfd *desc;
struct iwm_device_cmd *cmd;
bus_dma_tag_t data_dmat;
struct iwm_tx_data data[IWM_TX_RING_COUNT];
int qid;
int queued;
int cur;
};
#define IWM_RX_RING_COUNT 256
#define IWM_RBUF_COUNT (IWM_RX_RING_COUNT + 32)
/* Linux driver optionally uses 8k buffer */
#define IWM_RBUF_SIZE 4096
#define IWM_MAX_SCATTER 20
struct iwm_softc;
struct iwm_rbuf {
struct iwm_softc *sc;
void *vaddr;
bus_addr_t paddr;
};
struct iwm_rx_data {
struct mbuf *m;
bus_dmamap_t map;
int wantresp;
};
struct iwm_rx_ring {
struct iwm_dma_info desc_dma;
struct iwm_dma_info stat_dma;
struct iwm_dma_info buf_dma;
uint32_t *desc;
struct iwm_rb_status *stat;
struct iwm_rx_data data[IWM_RX_RING_COUNT];
bus_dma_tag_t data_dmat;
int cur;
};
#define IWM_FLAG_USE_ICT 0x01
#define IWM_FLAG_HW_INITED 0x02
#define IWM_FLAG_STOPPED 0x04
#define IWM_FLAG_RFKILL 0x08
#define IWM_FLAG_BUSY 0x10
struct iwm_ucode_status {
uint32_t uc_error_event_table;
uint32_t uc_log_event_table;
int uc_ok;
int uc_intr;
};
#define IWM_CMD_RESP_MAX PAGE_SIZE
#define IWM_OTP_LOW_IMAGE_SIZE 2048
#define IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
#define IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
/*
* Command headers are in iwl-trans.h, which is full of all
* kinds of other junk, so we just replicate the structures here.
* First the software bits:
*/
enum IWM_CMD_MODE {
IWM_CMD_SYNC = 0,
IWM_CMD_ASYNC = (1 << 0),
IWM_CMD_WANT_SKB = (1 << 1),
IWM_CMD_SEND_IN_RFKILL = (1 << 2),
};
enum iwm_hcmd_dataflag {
IWM_HCMD_DFL_NOCOPY = (1 << 0),
IWM_HCMD_DFL_DUP = (1 << 1),
};
/*
* iwlwifi/iwl-phy-db
*/
#define IWM_NUM_PAPD_CH_GROUPS 4
#define IWM_NUM_TXP_CH_GROUPS 9
struct iwm_phy_db_entry {
uint16_t size;
uint8_t *data;
};
struct iwm_phy_db {
struct iwm_phy_db_entry cfg;
struct iwm_phy_db_entry calib_nch;
struct iwm_phy_db_entry calib_ch_group_papd[IWM_NUM_PAPD_CH_GROUPS];
struct iwm_phy_db_entry calib_ch_group_txp[IWM_NUM_TXP_CH_GROUPS];
};
struct iwm_int_sta {
uint32_t sta_id;
uint32_t tfd_queue_msk;
};
struct iwm_mvm_phy_ctxt {
uint16_t id;
uint16_t color;
uint32_t ref;
struct ieee80211_channel *channel;
};
struct iwm_bf_data {
int bf_enabled; /* filtering */
int ba_enabled; /* abort */
int ave_beacon_signal;
int last_cqm_event;
};
struct iwm_vap {
struct ieee80211vap iv_vap;
uint8_t macaddr[IEEE80211_ADDR_LEN];
int is_uploaded;
int (*iv_newstate)(struct ieee80211vap *, enum ieee80211_state, int);
};
#define IWM_VAP(_vap) ((struct iwm_vap *)(_vap))
struct iwm_softc {
- struct ifnet *sc_ifp;
- device_t sc_dev;
- struct ieee80211com *sc_ic;
+ struct mtx sc_mtx;
+ struct mbufq sc_snd;
+ struct ieee80211com sc_ic;
+ device_t sc_dev;
- int sc_newstate_pending;
-
- uint8_t sc_bssid[IEEE80211_ADDR_LEN];
-
struct intr_config_hook sc_preinit_hook;
- struct mtx sc_mtx;
struct callout sc_watchdog_to;
struct task init_task;
struct resource *sc_irq;
struct resource *sc_mem;
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
bus_size_t sc_sz;
bus_dma_tag_t sc_dmat;
void *sc_ih;
/* TX scheduler rings. */
struct iwm_dma_info sched_dma;
uint32_t sched_base;
/* TX/RX rings. */
struct iwm_tx_ring txq[IWM_MVM_MAX_QUEUES];
struct iwm_rx_ring rxq;
int qfullmsk;
int sc_sf_state;
/* ICT table. */
struct iwm_dma_info ict_dma;
int ict_cur;
int sc_hw_rev;
int sc_hw_id;
struct iwm_dma_info kw_dma;
struct iwm_dma_info fw_dma;
int sc_fw_chunk_done;
int sc_init_complete;
struct iwm_ucode_status sc_uc;
enum iwm_ucode_type sc_uc_current;
int sc_fwver;
int sc_capaflags;
int sc_capa_max_probe_len;
int sc_intmask;
int sc_flags;
uint32_t sc_debug;
/*
* So why do we need a separate stopped flag and a generation?
* the former protects the device from issueing commands when it's
* stopped (duh). The latter protects against race from a very
* fast stop/unstop cycle where threads waiting for responses do
* not have a chance to run in between. Notably: we want to stop
* the device from interrupt context when it craps out, so we
* don't have the luxury of waiting for quiescense.
*/
int sc_generation;
const char *sc_fwname;
bus_size_t sc_fwdmasegsz;
struct iwm_fw_info sc_fw;
int sc_fw_phy_config;
struct iwm_tlv_calib_ctrl sc_default_calib[IWM_UCODE_TYPE_MAX];
struct iwm_nvm_data sc_nvm;
struct iwm_phy_db sc_phy_db;
struct iwm_bf_data sc_bf;
int sc_tx_timer;
struct iwm_scan_cmd *sc_scan_cmd;
size_t sc_scan_cmd_len;
int sc_scan_last_antenna;
int sc_scanband;
int sc_auth_prot;
int sc_fixed_ridx;
int sc_staid;
int sc_nodecolor;
uint8_t sc_cmd_resp[IWM_CMD_RESP_MAX];
int sc_wantresp;
struct taskqueue *sc_tq;
struct task sc_es_task;
struct iwm_rx_phy_info sc_last_phy_info;
int sc_ampdu_ref;
struct iwm_int_sta sc_aux_sta;
/* phy contexts. we only use the first one */
struct iwm_mvm_phy_ctxt sc_phyctxt[IWM_NUM_PHY_CTX];
struct iwm_notif_statistics sc_stats;
int sc_noise;
int host_interrupt_operation_mode;
caddr_t sc_drvbpf;
union {
struct iwm_rx_radiotap_header th;
uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
} sc_rxtapu;
#define sc_rxtap sc_rxtapu.th
int sc_rxtap_len;
union {
struct iwm_tx_radiotap_header th;
uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
} sc_txtapu;
#define sc_txtap sc_txtapu.th
int sc_txtap_len;
int sc_max_rssi;
};
#define IWM_DEFAULT_MACID 0
#define IWM_DEFAULT_COLOR 0
#define IWM_DEFAULT_TSFID 0
struct iwm_node {
struct ieee80211_node in_ni;
struct iwm_mvm_phy_ctxt *in_phyctxt;
/* status "bits" */
int in_assoc;
struct iwm_lq_cmd in_lq;
uint8_t in_ridx[IEEE80211_RATE_MAXSIZE];
};
#define IWM_STATION_ID 0
#define IWM_ICT_SIZE 4096
#define IWM_ICT_COUNT (IWM_ICT_SIZE / sizeof (uint32_t))
#define IWM_ICT_PADDR_SHIFT 12
Index: head/sys/dev/iwn/if_iwn.c
===================================================================
--- head/sys/dev/iwn/if_iwn.c (revision 287196)
+++ head/sys/dev/iwn/if_iwn.c (revision 287197)
@@ -1,9053 +1,8981 @@
/*-
* Copyright (c) 2007-2009 Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2008 Benjamin Close <benjsc@FreeBSD.org>
* Copyright (c) 2008 Sam Leffler, Errno Consulting
* Copyright (c) 2011 Intel Corporation
* Copyright (c) 2013 Cedric GROSS <c.gross@kreiz-it.fr>
* Copyright (c) 2013 Adrian Chadd <adrian@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
* adapters.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_wlan.h"
#include "opt_iwn.h"
#include <sys/param.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <sys/limits.h>
#include <sys/module.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/clock.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/iwn/if_iwnreg.h>
#include <dev/iwn/if_iwnvar.h>
#include <dev/iwn/if_iwn_devid.h>
#include <dev/iwn/if_iwn_chip_cfg.h>
#include <dev/iwn/if_iwn_debug.h>
#include <dev/iwn/if_iwn_ioctl.h>
struct iwn_ident {
uint16_t vendor;
uint16_t device;
const char *name;
};
static const struct iwn_ident iwn_ident_table[] = {
{ 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" },
{ 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" },
{ 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" },
{ 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" },
{ 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" },
{ 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" },
{ 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" },
{ 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" },
{ 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" },
{ 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" },
{ 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" },
{ 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" },
{ 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" },
{ 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" },
/* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */
{ 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" },
{ 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" },
{ 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" },
{ 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" },
{ 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" },
{ 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" },
{ 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105" },
{ 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105" },
{ 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135" },
{ 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135" },
{ 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" },
{ 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" },
{ 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" },
{ 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" },
{ 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" },
{ 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" },
{ 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" },
{ 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" },
{ 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" },
{ 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" },
{ 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" },
{ 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" },
{ 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" },
{ 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" },
{ 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" },
{ 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" },
{ 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" },
{ 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" },
{ 0, 0, NULL }
};
static int iwn_probe(device_t);
static int iwn_attach(device_t);
static int iwn4965_attach(struct iwn_softc *, uint16_t);
static int iwn5000_attach(struct iwn_softc *, uint16_t);
static int iwn_config_specific(struct iwn_softc *, uint16_t);
static void iwn_radiotap_attach(struct iwn_softc *);
static void iwn_sysctlattach(struct iwn_softc *);
static struct ieee80211vap *iwn_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void iwn_vap_delete(struct ieee80211vap *);
static int iwn_detach(device_t);
static int iwn_shutdown(device_t);
static int iwn_suspend(device_t);
static int iwn_resume(device_t);
static int iwn_nic_lock(struct iwn_softc *);
static int iwn_eeprom_lock(struct iwn_softc *);
static int iwn_init_otprom(struct iwn_softc *);
static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *,
void **, bus_size_t, bus_size_t);
static void iwn_dma_contig_free(struct iwn_dma_info *);
static int iwn_alloc_sched(struct iwn_softc *);
static void iwn_free_sched(struct iwn_softc *);
static int iwn_alloc_kw(struct iwn_softc *);
static void iwn_free_kw(struct iwn_softc *);
static int iwn_alloc_ict(struct iwn_softc *);
static void iwn_free_ict(struct iwn_softc *);
static int iwn_alloc_fwmem(struct iwn_softc *);
static void iwn_free_fwmem(struct iwn_softc *);
static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
int);
static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
static void iwn5000_ict_reset(struct iwn_softc *);
static int iwn_read_eeprom(struct iwn_softc *,
uint8_t macaddr[IEEE80211_ADDR_LEN]);
static void iwn4965_read_eeprom(struct iwn_softc *);
#ifdef IWN_DEBUG
static void iwn4965_print_power_group(struct iwn_softc *, int);
#endif
static void iwn5000_read_eeprom(struct iwn_softc *);
static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *);
static void iwn_read_eeprom_band(struct iwn_softc *, int);
static void iwn_read_eeprom_ht40(struct iwn_softc *, int);
static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *,
struct ieee80211_channel *);
static int iwn_setregdomain(struct ieee80211com *,
struct ieee80211_regdomain *, int,
struct ieee80211_channel[]);
static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
static void iwn_newassoc(struct ieee80211_node *, int);
static int iwn_media_change(struct ifnet *);
static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void iwn_calib_timeout(void *);
static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
struct iwn_rx_data *);
static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
struct iwn_rx_data *);
static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
struct iwn_rx_data *);
static void iwn5000_rx_calib_results(struct iwn_softc *,
struct iwn_rx_desc *, struct iwn_rx_data *);
static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
struct iwn_rx_data *);
static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
struct iwn_rx_data *);
static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
struct iwn_rx_data *);
static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
uint8_t);
static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, int, void *);
static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
static void iwn_notif_intr(struct iwn_softc *);
static void iwn_wakeup_intr(struct iwn_softc *);
static void iwn_rftoggle_intr(struct iwn_softc *);
static void iwn_fatal_intr(struct iwn_softc *);
static void iwn_intr(void *);
static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
uint16_t);
static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
uint16_t);
#ifdef notyet
static void iwn5000_reset_sched(struct iwn_softc *, int, int);
#endif
static int iwn_tx_data(struct iwn_softc *, struct mbuf *,
struct ieee80211_node *);
static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *,
struct ieee80211_node *,
const struct ieee80211_bpf_params *params);
static void iwn_xmit_task(void *arg0, int pending);
static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
-static void iwn_start(struct ifnet *);
-static void iwn_start_locked(struct ifnet *);
+static int iwn_transmit(struct ieee80211com *, struct mbuf *);
+static void iwn_start_locked(struct iwn_softc *);
static void iwn_watchdog(void *);
-static int iwn_ioctl(struct ifnet *, u_long, caddr_t);
+static int iwn_ioctl(struct ieee80211com *, u_long , void *);
+static void iwn_parent(struct ieee80211com *);
static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
int);
static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
int);
static int iwn_set_link_quality(struct iwn_softc *,
struct ieee80211_node *);
static int iwn_add_broadcast_node(struct iwn_softc *, int);
static int iwn_updateedca(struct ieee80211com *);
static void iwn_update_mcast(struct ieee80211com *);
static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
static int iwn_set_critical_temp(struct iwn_softc *);
static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
static void iwn4965_power_calibration(struct iwn_softc *, int);
static int iwn4965_set_txpower(struct iwn_softc *,
struct ieee80211_channel *, int);
static int iwn5000_set_txpower(struct iwn_softc *,
struct ieee80211_channel *, int);
static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *);
static int iwn_get_noise(const struct iwn_rx_general_stats *);
static int iwn4965_get_temperature(struct iwn_softc *);
static int iwn5000_get_temperature(struct iwn_softc *);
static int iwn_init_sensitivity(struct iwn_softc *);
static void iwn_collect_noise(struct iwn_softc *,
const struct iwn_rx_general_stats *);
static int iwn4965_init_gains(struct iwn_softc *);
static int iwn5000_init_gains(struct iwn_softc *);
static int iwn4965_set_gains(struct iwn_softc *);
static int iwn5000_set_gains(struct iwn_softc *);
static void iwn_tune_sensitivity(struct iwn_softc *,
const struct iwn_rx_stats *);
static void iwn_save_stats_counters(struct iwn_softc *,
const struct iwn_stats *);
static int iwn_send_sensitivity(struct iwn_softc *);
static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *);
static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
static int iwn_send_btcoex(struct iwn_softc *);
static int iwn_send_advanced_btcoex(struct iwn_softc *);
static int iwn5000_runtime_calib(struct iwn_softc *);
static int iwn_config(struct iwn_softc *);
static int iwn_scan(struct iwn_softc *, struct ieee80211vap *,
struct ieee80211_scan_state *, struct ieee80211_channel *);
static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap);
static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap);
static int iwn_ampdu_rx_start(struct ieee80211_node *,
struct ieee80211_rx_ampdu *, int, int, int);
static void iwn_ampdu_rx_stop(struct ieee80211_node *,
struct ieee80211_rx_ampdu *);
static int iwn_addba_request(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int, int, int);
static int iwn_addba_response(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int, int, int);
static int iwn_ampdu_tx_start(struct ieee80211com *,
struct ieee80211_node *, uint8_t);
static void iwn_ampdu_tx_stop(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
static void iwn4965_ampdu_tx_start(struct iwn_softc *,
struct ieee80211_node *, int, uint8_t, uint16_t);
static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int,
uint8_t, uint16_t);
static void iwn5000_ampdu_tx_start(struct iwn_softc *,
struct ieee80211_node *, int, uint8_t, uint16_t);
static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int,
uint8_t, uint16_t);
static int iwn5000_query_calibration(struct iwn_softc *);
static int iwn5000_send_calibration(struct iwn_softc *);
static int iwn5000_send_wimax_coex(struct iwn_softc *);
static int iwn5000_crystal_calib(struct iwn_softc *);
static int iwn5000_temp_offset_calib(struct iwn_softc *);
static int iwn5000_temp_offset_calibv2(struct iwn_softc *);
static int iwn4965_post_alive(struct iwn_softc *);
static int iwn5000_post_alive(struct iwn_softc *);
static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
int);
static int iwn4965_load_firmware(struct iwn_softc *);
static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
const uint8_t *, int);
static int iwn5000_load_firmware(struct iwn_softc *);
static int iwn_read_firmware_leg(struct iwn_softc *,
struct iwn_fw_info *);
static int iwn_read_firmware_tlv(struct iwn_softc *,
struct iwn_fw_info *, uint16_t);
static int iwn_read_firmware(struct iwn_softc *);
static int iwn_clock_wait(struct iwn_softc *);
static int iwn_apm_init(struct iwn_softc *);
static void iwn_apm_stop_master(struct iwn_softc *);
static void iwn_apm_stop(struct iwn_softc *);
static int iwn4965_nic_config(struct iwn_softc *);
static int iwn5000_nic_config(struct iwn_softc *);
static int iwn_hw_prepare(struct iwn_softc *);
static int iwn_hw_init(struct iwn_softc *);
static void iwn_hw_stop(struct iwn_softc *);
static void iwn_radio_on(void *, int);
static void iwn_radio_off(void *, int);
static void iwn_panicked(void *, int);
static void iwn_init_locked(struct iwn_softc *);
-static void iwn_init(void *);
+static void iwn_init(struct iwn_softc *);
static void iwn_stop_locked(struct iwn_softc *);
static void iwn_stop(struct iwn_softc *);
static void iwn_scan_start(struct ieee80211com *);
static void iwn_scan_end(struct ieee80211com *);
static void iwn_set_channel(struct ieee80211com *);
static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long);
static void iwn_scan_mindwell(struct ieee80211_scan_state *);
static void iwn_hw_reset(void *, int);
#ifdef IWN_DEBUG
static char *iwn_get_csr_string(int);
static void iwn_debug_register(struct iwn_softc *);
#endif
static device_method_t iwn_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, iwn_probe),
DEVMETHOD(device_attach, iwn_attach),
DEVMETHOD(device_detach, iwn_detach),
DEVMETHOD(device_shutdown, iwn_shutdown),
DEVMETHOD(device_suspend, iwn_suspend),
DEVMETHOD(device_resume, iwn_resume),
DEVMETHOD_END
};
static driver_t iwn_driver = {
"iwn",
iwn_methods,
sizeof(struct iwn_softc)
};
static devclass_t iwn_devclass;
DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL);
MODULE_VERSION(iwn, 1);
MODULE_DEPEND(iwn, firmware, 1, 1, 1);
MODULE_DEPEND(iwn, pci, 1, 1, 1);
MODULE_DEPEND(iwn, wlan, 1, 1, 1);
static int
iwn_probe(device_t dev)
{
const struct iwn_ident *ident;
for (ident = iwn_ident_table; ident->name != NULL; ident++) {
if (pci_get_vendor(dev) == ident->vendor &&
pci_get_device(dev) == ident->device) {
device_set_desc(dev, ident->name);
return (BUS_PROBE_DEFAULT);
}
}
return ENXIO;
}
static int
iwn_is_3stream_device(struct iwn_softc *sc)
{
/* XXX for now only 5300, until the 5350 can be tested */
if (sc->hw_type == IWN_HW_REV_TYPE_5300)
return (1);
return (0);
}
static int
iwn_attach(device_t dev)
{
struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev);
struct ieee80211com *ic;
- struct ifnet *ifp;
int i, error, rid;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
sc->sc_dev = dev;
#ifdef IWN_DEBUG
error = resource_int_value(device_get_name(sc->sc_dev),
device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
if (error != 0)
sc->sc_debug = 0;
#else
sc->sc_debug = 0;
#endif
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__);
/*
* Get the offset of the PCI Express Capability Structure in PCI
* Configuration Space.
*/
error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
if (error != 0) {
device_printf(dev, "PCIe capability structure not found!\n");
return error;
}
/* Clear device-specific "PCI retry timeout" register (41h). */
pci_write_config(dev, 0x41, 0, 1);
/* Enable bus-mastering. */
pci_enable_busmaster(dev);
rid = PCIR_BAR(0);
sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mem == NULL) {
device_printf(dev, "can't map mem space\n");
error = ENOMEM;
return error;
}
sc->sc_st = rman_get_bustag(sc->mem);
sc->sc_sh = rman_get_bushandle(sc->mem);
i = 1;
rid = 0;
if (pci_alloc_msi(dev, &i) == 0)
rid = 1;
/* Install interrupt handler. */
sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
(rid != 0 ? 0 : RF_SHAREABLE));
if (sc->irq == NULL) {
device_printf(dev, "can't map interrupt\n");
error = ENOMEM;
goto fail;
}
IWN_LOCK_INIT(sc);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
/* Read hardware revision and attach. */
sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT)
& IWN_HW_REV_TYPE_MASK;
sc->subdevice_id = pci_get_subdevice(dev);
/*
* 4965 versus 5000 and later have different methods.
* Let's set those up first.
*/
if (sc->hw_type == IWN_HW_REV_TYPE_4965)
error = iwn4965_attach(sc, pci_get_device(dev));
else
error = iwn5000_attach(sc, pci_get_device(dev));
if (error != 0) {
device_printf(dev, "could not attach device, error %d\n",
error);
goto fail;
}
/*
* Next, let's setup the various parameters of each NIC.
*/
error = iwn_config_specific(sc, pci_get_device(dev));
if (error != 0) {
device_printf(dev, "could not attach device, error %d\n",
error);
goto fail;
}
if ((error = iwn_hw_prepare(sc)) != 0) {
device_printf(dev, "hardware not ready, error %d\n", error);
goto fail;
}
/* Allocate DMA memory for firmware transfers. */
if ((error = iwn_alloc_fwmem(sc)) != 0) {
device_printf(dev,
"could not allocate memory for firmware, error %d\n",
error);
goto fail;
}
/* Allocate "Keep Warm" page. */
if ((error = iwn_alloc_kw(sc)) != 0) {
device_printf(dev,
"could not allocate keep warm page, error %d\n", error);
goto fail;
}
/* Allocate ICT table for 5000 Series. */
if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
(error = iwn_alloc_ict(sc)) != 0) {
device_printf(dev, "could not allocate ICT table, error %d\n",
error);
goto fail;
}
/* Allocate TX scheduler "rings". */
if ((error = iwn_alloc_sched(sc)) != 0) {
device_printf(dev,
"could not allocate TX scheduler rings, error %d\n", error);
goto fail;
}
/* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
for (i = 0; i < sc->ntxqs; i++) {
if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
device_printf(dev,
"could not allocate TX ring %d, error %d\n", i,
error);
goto fail;
}
}
/* Allocate RX ring. */
if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
device_printf(dev, "could not allocate RX ring, error %d\n",
error);
goto fail;
}
/* Clear pending interrupts. */
IWN_WRITE(sc, IWN_INT, 0xffffffff);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- goto fail;
- }
-
- ic = ifp->if_l2com;
- ic->ic_ifp = ifp;
+ ic = &sc->sc_ic;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
/* Set device capabilities. */
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_MONITOR /* monitor mode supported */
#if 0
| IEEE80211_C_BGSCAN /* background scanning */
#endif
| IEEE80211_C_TXPMGT /* tx power management */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WPA
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
#if 0
| IEEE80211_C_IBSS /* ibss/adhoc mode */
#endif
| IEEE80211_C_WME /* WME */
| IEEE80211_C_PMGT /* Station-side power mgmt */
;
/* Read MAC address, channels, etc from EEPROM. */
- if ((error = iwn_read_eeprom(sc, macaddr)) != 0) {
+ if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) {
device_printf(dev, "could not read EEPROM, error %d\n",
error);
goto fail;
}
/* Count the number of available chains. */
sc->ntxchains =
((sc->txchainmask >> 2) & 1) +
((sc->txchainmask >> 1) & 1) +
((sc->txchainmask >> 0) & 1);
sc->nrxchains =
((sc->rxchainmask >> 2) & 1) +
((sc->rxchainmask >> 1) & 1) +
((sc->rxchainmask >> 0) & 1);
if (bootverbose) {
device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n",
sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
- macaddr, ":");
+ ic->ic_macaddr, ":");
}
if (sc->sc_flags & IWN_FLAG_HAS_11N) {
ic->ic_rxstream = sc->nrxchains;
ic->ic_txstream = sc->ntxchains;
/*
* Some of the 3 antenna devices (ie, the 4965) only supports
* 2x2 operation. So correct the number of streams if
* it's not a 3-stream device.
*/
if (! iwn_is_3stream_device(sc)) {
if (ic->ic_rxstream > 2)
ic->ic_rxstream = 2;
if (ic->ic_txstream > 2)
ic->ic_txstream = 2;
}
ic->ic_htcaps =
IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */
| IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
| IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
| IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
#ifdef notyet
| IEEE80211_HTCAP_GREENFIELD
#if IWN_RBUF_SIZE == 8192
| IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
#else
| IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
#endif
#endif
/* s/w capabilities */
| IEEE80211_HTC_HT /* HT operation */
| IEEE80211_HTC_AMPDU /* tx A-MPDU */
#ifdef notyet
| IEEE80211_HTC_AMSDU /* tx A-MSDU */
#endif
;
}
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_softc = sc;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = iwn_init;
- ifp->if_ioctl = iwn_ioctl;
- ifp->if_start = iwn_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
ic->ic_vap_create = iwn_vap_create;
+ ic->ic_ioctl = iwn_ioctl;
+ ic->ic_parent = iwn_parent;
ic->ic_vap_delete = iwn_vap_delete;
+ ic->ic_transmit = iwn_transmit;
ic->ic_raw_xmit = iwn_raw_xmit;
ic->ic_node_alloc = iwn_node_alloc;
sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
sc->sc_addba_request = ic->ic_addba_request;
ic->ic_addba_request = iwn_addba_request;
sc->sc_addba_response = ic->ic_addba_response;
ic->ic_addba_response = iwn_addba_response;
sc->sc_addba_stop = ic->ic_addba_stop;
ic->ic_addba_stop = iwn_ampdu_tx_stop;
ic->ic_newassoc = iwn_newassoc;
ic->ic_wme.wme_update = iwn_updateedca;
ic->ic_update_mcast = iwn_update_mcast;
ic->ic_scan_start = iwn_scan_start;
ic->ic_scan_end = iwn_scan_end;
ic->ic_set_channel = iwn_set_channel;
ic->ic_scan_curchan = iwn_scan_curchan;
ic->ic_scan_mindwell = iwn_scan_mindwell;
ic->ic_setregdomain = iwn_setregdomain;
iwn_radiotap_attach(sc);
callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0);
callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
TASK_INIT(&sc->sc_reinit_task, 0, iwn_hw_reset, sc);
TASK_INIT(&sc->sc_radioon_task, 0, iwn_radio_on, sc);
TASK_INIT(&sc->sc_radiooff_task, 0, iwn_radio_off, sc);
TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc);
TASK_INIT(&sc->sc_xmit_task, 0, iwn_xmit_task, sc);
mbufq_init(&sc->sc_xmit_queue, 1024);
sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->sc_tq);
error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq");
if (error != 0) {
device_printf(dev, "can't start threads, error %d\n", error);
goto fail;
}
iwn_sysctlattach(sc);
/*
* Hook our interrupt after all initialization is complete.
*/
error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, iwn_intr, sc, &sc->sc_ih);
if (error != 0) {
device_printf(dev, "can't establish interrupt, error %d\n",
error);
goto fail;
}
#if 0
device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n",
__func__,
sizeof(struct iwn_stats),
sizeof(struct iwn_stats_bt));
#endif
if (bootverbose)
ieee80211_announce(ic);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return 0;
fail:
iwn_detach(dev);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
return error;
}
/*
* Define specific configuration based on device id and subdevice id
* pid : PCI device id
*/
static int
iwn_config_specific(struct iwn_softc *sc, uint16_t pid)
{
switch (pid) {
/* 4965 series */
case IWN_DID_4965_1:
case IWN_DID_4965_2:
case IWN_DID_4965_3:
case IWN_DID_4965_4:
sc->base_params = &iwn4965_base_params;
sc->limits = &iwn4965_sensitivity_limits;
sc->fwname = "iwn4965fw";
/* Override chains masks, ROM is known to be broken. */
sc->txchainmask = IWN_ANT_AB;
sc->rxchainmask = IWN_ANT_ABC;
/* Enable normal btcoex */
sc->sc_flags |= IWN_FLAG_BTCOEX;
break;
/* 1000 Series */
case IWN_DID_1000_1:
case IWN_DID_1000_2:
switch(sc->subdevice_id) {
case IWN_SDID_1000_1:
case IWN_SDID_1000_2:
case IWN_SDID_1000_3:
case IWN_SDID_1000_4:
case IWN_SDID_1000_5:
case IWN_SDID_1000_6:
case IWN_SDID_1000_7:
case IWN_SDID_1000_8:
case IWN_SDID_1000_9:
case IWN_SDID_1000_10:
case IWN_SDID_1000_11:
case IWN_SDID_1000_12:
sc->limits = &iwn1000_sensitivity_limits;
sc->base_params = &iwn1000_base_params;
sc->fwname = "iwn1000fw";
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 6x00 Series */
case IWN_DID_6x00_2:
case IWN_DID_6x00_4:
case IWN_DID_6x00_1:
case IWN_DID_6x00_3:
sc->fwname = "iwn6000fw";
sc->limits = &iwn6000_sensitivity_limits;
switch(sc->subdevice_id) {
case IWN_SDID_6x00_1:
case IWN_SDID_6x00_2:
case IWN_SDID_6x00_8:
//iwl6000_3agn_cfg
sc->base_params = &iwn_6000_base_params;
break;
case IWN_SDID_6x00_3:
case IWN_SDID_6x00_6:
case IWN_SDID_6x00_9:
////iwl6000i_2agn
case IWN_SDID_6x00_4:
case IWN_SDID_6x00_7:
case IWN_SDID_6x00_10:
//iwl6000i_2abg_cfg
case IWN_SDID_6x00_5:
//iwl6000i_2bg_cfg
sc->base_params = &iwn_6000i_base_params;
sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
sc->txchainmask = IWN_ANT_BC;
sc->rxchainmask = IWN_ANT_BC;
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 6x05 Series */
case IWN_DID_6x05_1:
case IWN_DID_6x05_2:
switch(sc->subdevice_id) {
case IWN_SDID_6x05_1:
case IWN_SDID_6x05_4:
case IWN_SDID_6x05_6:
//iwl6005_2agn_cfg
case IWN_SDID_6x05_2:
case IWN_SDID_6x05_5:
case IWN_SDID_6x05_7:
//iwl6005_2abg_cfg
case IWN_SDID_6x05_3:
//iwl6005_2bg_cfg
case IWN_SDID_6x05_8:
case IWN_SDID_6x05_9:
//iwl6005_2agn_sff_cfg
case IWN_SDID_6x05_10:
//iwl6005_2agn_d_cfg
case IWN_SDID_6x05_11:
//iwl6005_2agn_mow1_cfg
case IWN_SDID_6x05_12:
//iwl6005_2agn_mow2_cfg
sc->fwname = "iwn6000g2afw";
sc->limits = &iwn6000_sensitivity_limits;
sc->base_params = &iwn_6000g2_base_params;
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 6x35 Series */
case IWN_DID_6035_1:
case IWN_DID_6035_2:
switch(sc->subdevice_id) {
case IWN_SDID_6035_1:
case IWN_SDID_6035_2:
case IWN_SDID_6035_3:
case IWN_SDID_6035_4:
sc->fwname = "iwn6000g2bfw";
sc->limits = &iwn6235_sensitivity_limits;
sc->base_params = &iwn_6235_base_params;
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 6x50 WiFi/WiMax Series */
case IWN_DID_6050_1:
case IWN_DID_6050_2:
switch(sc->subdevice_id) {
case IWN_SDID_6050_1:
case IWN_SDID_6050_3:
case IWN_SDID_6050_5:
//iwl6050_2agn_cfg
case IWN_SDID_6050_2:
case IWN_SDID_6050_4:
case IWN_SDID_6050_6:
//iwl6050_2abg_cfg
sc->fwname = "iwn6050fw";
sc->txchainmask = IWN_ANT_AB;
sc->rxchainmask = IWN_ANT_AB;
sc->limits = &iwn6000_sensitivity_limits;
sc->base_params = &iwn_6050_base_params;
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 6150 WiFi/WiMax Series */
case IWN_DID_6150_1:
case IWN_DID_6150_2:
switch(sc->subdevice_id) {
case IWN_SDID_6150_1:
case IWN_SDID_6150_3:
case IWN_SDID_6150_5:
// iwl6150_bgn_cfg
case IWN_SDID_6150_2:
case IWN_SDID_6150_4:
case IWN_SDID_6150_6:
//iwl6150_bg_cfg
sc->fwname = "iwn6050fw";
sc->limits = &iwn6000_sensitivity_limits;
sc->base_params = &iwn_6150_base_params;
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 6030 Series and 1030 Series */
case IWN_DID_x030_1:
case IWN_DID_x030_2:
case IWN_DID_x030_3:
case IWN_DID_x030_4:
switch(sc->subdevice_id) {
case IWN_SDID_x030_1:
case IWN_SDID_x030_3:
case IWN_SDID_x030_5:
// iwl1030_bgn_cfg
case IWN_SDID_x030_2:
case IWN_SDID_x030_4:
case IWN_SDID_x030_6:
//iwl1030_bg_cfg
case IWN_SDID_x030_7:
case IWN_SDID_x030_10:
case IWN_SDID_x030_14:
//iwl6030_2agn_cfg
case IWN_SDID_x030_8:
case IWN_SDID_x030_11:
case IWN_SDID_x030_15:
// iwl6030_2bgn_cfg
case IWN_SDID_x030_9:
case IWN_SDID_x030_12:
case IWN_SDID_x030_16:
// iwl6030_2abg_cfg
case IWN_SDID_x030_13:
//iwl6030_2bg_cfg
sc->fwname = "iwn6000g2bfw";
sc->limits = &iwn6000_sensitivity_limits;
sc->base_params = &iwn_6000g2b_base_params;
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 130 Series WiFi */
/* XXX: This series will need adjustment for rate.
* see rx_with_siso_diversity in linux kernel
*/
case IWN_DID_130_1:
case IWN_DID_130_2:
switch(sc->subdevice_id) {
case IWN_SDID_130_1:
case IWN_SDID_130_3:
case IWN_SDID_130_5:
//iwl130_bgn_cfg
case IWN_SDID_130_2:
case IWN_SDID_130_4:
case IWN_SDID_130_6:
//iwl130_bg_cfg
sc->fwname = "iwn6000g2bfw";
sc->limits = &iwn6000_sensitivity_limits;
sc->base_params = &iwn_6000g2b_base_params;
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 100 Series WiFi */
case IWN_DID_100_1:
case IWN_DID_100_2:
switch(sc->subdevice_id) {
case IWN_SDID_100_1:
case IWN_SDID_100_2:
case IWN_SDID_100_3:
case IWN_SDID_100_4:
case IWN_SDID_100_5:
case IWN_SDID_100_6:
sc->limits = &iwn1000_sensitivity_limits;
sc->base_params = &iwn1000_base_params;
sc->fwname = "iwn100fw";
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 105 Series */
/* XXX: This series will need adjustment for rate.
* see rx_with_siso_diversity in linux kernel
*/
case IWN_DID_105_1:
case IWN_DID_105_2:
switch(sc->subdevice_id) {
case IWN_SDID_105_1:
case IWN_SDID_105_2:
case IWN_SDID_105_3:
//iwl105_bgn_cfg
case IWN_SDID_105_4:
//iwl105_bgn_d_cfg
sc->limits = &iwn2030_sensitivity_limits;
sc->base_params = &iwn2000_base_params;
sc->fwname = "iwn105fw";
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 135 Series */
/* XXX: This series will need adjustment for rate.
* see rx_with_siso_diversity in linux kernel
*/
case IWN_DID_135_1:
case IWN_DID_135_2:
switch(sc->subdevice_id) {
case IWN_SDID_135_1:
case IWN_SDID_135_2:
case IWN_SDID_135_3:
sc->limits = &iwn2030_sensitivity_limits;
sc->base_params = &iwn2030_base_params;
sc->fwname = "iwn135fw";
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 2x00 Series */
case IWN_DID_2x00_1:
case IWN_DID_2x00_2:
switch(sc->subdevice_id) {
case IWN_SDID_2x00_1:
case IWN_SDID_2x00_2:
case IWN_SDID_2x00_3:
//iwl2000_2bgn_cfg
case IWN_SDID_2x00_4:
//iwl2000_2bgn_d_cfg
sc->limits = &iwn2030_sensitivity_limits;
sc->base_params = &iwn2000_base_params;
sc->fwname = "iwn2000fw";
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice) \n",
pid, sc->subdevice_id, sc->hw_type);
return ENOTSUP;
}
break;
/* 2x30 Series */
case IWN_DID_2x30_1:
case IWN_DID_2x30_2:
switch(sc->subdevice_id) {
case IWN_SDID_2x30_1:
case IWN_SDID_2x30_3:
case IWN_SDID_2x30_5:
//iwl100_bgn_cfg
case IWN_SDID_2x30_2:
case IWN_SDID_2x30_4:
case IWN_SDID_2x30_6:
//iwl100_bg_cfg
sc->limits = &iwn2030_sensitivity_limits;
sc->base_params = &iwn2030_base_params;
sc->fwname = "iwn2030fw";
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 5x00 Series */
case IWN_DID_5x00_1:
case IWN_DID_5x00_2:
case IWN_DID_5x00_3:
case IWN_DID_5x00_4:
sc->limits = &iwn5000_sensitivity_limits;
sc->base_params = &iwn5000_base_params;
sc->fwname = "iwn5000fw";
switch(sc->subdevice_id) {
case IWN_SDID_5x00_1:
case IWN_SDID_5x00_2:
case IWN_SDID_5x00_3:
case IWN_SDID_5x00_4:
case IWN_SDID_5x00_9:
case IWN_SDID_5x00_10:
case IWN_SDID_5x00_11:
case IWN_SDID_5x00_12:
case IWN_SDID_5x00_17:
case IWN_SDID_5x00_18:
case IWN_SDID_5x00_19:
case IWN_SDID_5x00_20:
//iwl5100_agn_cfg
sc->txchainmask = IWN_ANT_B;
sc->rxchainmask = IWN_ANT_AB;
break;
case IWN_SDID_5x00_5:
case IWN_SDID_5x00_6:
case IWN_SDID_5x00_13:
case IWN_SDID_5x00_14:
case IWN_SDID_5x00_21:
case IWN_SDID_5x00_22:
//iwl5100_bgn_cfg
sc->txchainmask = IWN_ANT_B;
sc->rxchainmask = IWN_ANT_AB;
break;
case IWN_SDID_5x00_7:
case IWN_SDID_5x00_8:
case IWN_SDID_5x00_15:
case IWN_SDID_5x00_16:
case IWN_SDID_5x00_23:
case IWN_SDID_5x00_24:
//iwl5100_abg_cfg
sc->txchainmask = IWN_ANT_B;
sc->rxchainmask = IWN_ANT_AB;
break;
case IWN_SDID_5x00_25:
case IWN_SDID_5x00_26:
case IWN_SDID_5x00_27:
case IWN_SDID_5x00_28:
case IWN_SDID_5x00_29:
case IWN_SDID_5x00_30:
case IWN_SDID_5x00_31:
case IWN_SDID_5x00_32:
case IWN_SDID_5x00_33:
case IWN_SDID_5x00_34:
case IWN_SDID_5x00_35:
case IWN_SDID_5x00_36:
//iwl5300_agn_cfg
sc->txchainmask = IWN_ANT_ABC;
sc->rxchainmask = IWN_ANT_ABC;
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
/* 5x50 Series */
case IWN_DID_5x50_1:
case IWN_DID_5x50_2:
case IWN_DID_5x50_3:
case IWN_DID_5x50_4:
sc->limits = &iwn5000_sensitivity_limits;
sc->base_params = &iwn5000_base_params;
sc->fwname = "iwn5000fw";
switch(sc->subdevice_id) {
case IWN_SDID_5x50_1:
case IWN_SDID_5x50_2:
case IWN_SDID_5x50_3:
//iwl5350_agn_cfg
sc->limits = &iwn5000_sensitivity_limits;
sc->base_params = &iwn5000_base_params;
sc->fwname = "iwn5000fw";
break;
case IWN_SDID_5x50_4:
case IWN_SDID_5x50_5:
case IWN_SDID_5x50_8:
case IWN_SDID_5x50_9:
case IWN_SDID_5x50_10:
case IWN_SDID_5x50_11:
//iwl5150_agn_cfg
case IWN_SDID_5x50_6:
case IWN_SDID_5x50_7:
case IWN_SDID_5x50_12:
case IWN_SDID_5x50_13:
//iwl5150_abg_cfg
sc->limits = &iwn5000_sensitivity_limits;
sc->fwname = "iwn5150fw";
sc->base_params = &iwn_5x50_base_params;
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :"
"0x%04x rev %d not supported (subdevice)\n", pid,
sc->subdevice_id,sc->hw_type);
return ENOTSUP;
}
break;
default:
device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x"
"rev 0x%08x not supported (device)\n", pid, sc->subdevice_id,
sc->hw_type);
return ENOTSUP;
}
return 0;
}
static int
iwn4965_attach(struct iwn_softc *sc, uint16_t pid)
{
struct iwn_ops *ops = &sc->ops;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
ops->load_firmware = iwn4965_load_firmware;
ops->read_eeprom = iwn4965_read_eeprom;
ops->post_alive = iwn4965_post_alive;
ops->nic_config = iwn4965_nic_config;
ops->update_sched = iwn4965_update_sched;
ops->get_temperature = iwn4965_get_temperature;
ops->get_rssi = iwn4965_get_rssi;
ops->set_txpower = iwn4965_set_txpower;
ops->init_gains = iwn4965_init_gains;
ops->set_gains = iwn4965_set_gains;
ops->add_node = iwn4965_add_node;
ops->tx_done = iwn4965_tx_done;
ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
sc->ntxqs = IWN4965_NTXQUEUES;
sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE;
sc->ndmachnls = IWN4965_NDMACHNLS;
sc->broadcast_id = IWN4965_ID_BROADCAST;
sc->rxonsz = IWN4965_RXONSZ;
sc->schedsz = IWN4965_SCHEDSZ;
sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
sc->fwsz = IWN4965_FWSZ;
sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
sc->limits = &iwn4965_sensitivity_limits;
sc->fwname = "iwn4965fw";
/* Override chains masks, ROM is known to be broken. */
sc->txchainmask = IWN_ANT_AB;
sc->rxchainmask = IWN_ANT_ABC;
/* Enable normal btcoex */
sc->sc_flags |= IWN_FLAG_BTCOEX;
DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__);
return 0;
}
static int
iwn5000_attach(struct iwn_softc *sc, uint16_t pid)
{
struct iwn_ops *ops = &sc->ops;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
ops->load_firmware = iwn5000_load_firmware;
ops->read_eeprom = iwn5000_read_eeprom;
ops->post_alive = iwn5000_post_alive;
ops->nic_config = iwn5000_nic_config;
ops->update_sched = iwn5000_update_sched;
ops->get_temperature = iwn5000_get_temperature;
ops->get_rssi = iwn5000_get_rssi;
ops->set_txpower = iwn5000_set_txpower;
ops->init_gains = iwn5000_init_gains;
ops->set_gains = iwn5000_set_gains;
ops->add_node = iwn5000_add_node;
ops->tx_done = iwn5000_tx_done;
ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
sc->ntxqs = IWN5000_NTXQUEUES;
sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE;
sc->ndmachnls = IWN5000_NDMACHNLS;
sc->broadcast_id = IWN5000_ID_BROADCAST;
sc->rxonsz = IWN5000_RXONSZ;
sc->schedsz = IWN5000_SCHEDSZ;
sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
sc->fwsz = IWN5000_FWSZ;
sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN;
return 0;
}
/*
* Attach the interface to 802.11 radiotap.
*/
static void
iwn_radiotap_attach(struct iwn_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
- ieee80211_radiotap_attach(ic,
+ ieee80211_radiotap_attach(&sc->sc_ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
IWN_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
IWN_RX_RADIOTAP_PRESENT);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
}
static void
iwn_sysctlattach(struct iwn_softc *sc)
{
#ifdef IWN_DEBUG
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
"control debugging printfs");
#endif
}
static struct ieee80211vap *
iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct iwn_softc *sc = ic->ic_softc;
struct iwn_vap *ivp;
struct ieee80211vap *vap;
- uint8_t mac1[IEEE80211_ADDR_LEN];
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
- IEEE80211_ADDR_COPY(mac1, mac);
-
- ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (ivp == NULL)
- return NULL;
+ ivp = malloc(sizeof(struct iwn_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &ivp->iv_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac1);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
ivp->ctx = IWN_RXON_BSS_CTX;
- IEEE80211_ADDR_COPY(ivp->macaddr, mac1);
vap->iv_bmissthreshold = 10; /* override default */
/* Override with driver methods. */
ivp->iv_newstate = vap->iv_newstate;
vap->iv_newstate = iwn_newstate;
sc->ivap[IWN_RXON_BSS_CTX] = vap;
ieee80211_ratectl_init(vap);
/* Complete setup. */
- ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status,
+ mac);
ic->ic_opmode = opmode;
return vap;
}
static void
iwn_vap_delete(struct ieee80211vap *vap)
{
struct iwn_vap *ivp = IWN_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(ivp, M_80211_VAP);
}
static void
iwn_xmit_queue_drain(struct iwn_softc *sc)
{
struct mbuf *m;
struct ieee80211_node *ni;
IWN_LOCK_ASSERT(sc);
while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) {
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
ieee80211_free_node(ni);
m_freem(m);
}
}
static int
iwn_xmit_queue_enqueue(struct iwn_softc *sc, struct mbuf *m)
{
IWN_LOCK_ASSERT(sc);
return (mbufq_enqueue(&sc->sc_xmit_queue, m));
}
static int
iwn_detach(device_t dev)
{
struct iwn_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic;
int qid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
- if (ifp != NULL) {
- ic = ifp->if_l2com;
-
+ if (sc->sc_ic.ic_softc != NULL) {
/* Free the mbuf queue and node references */
IWN_LOCK(sc);
iwn_xmit_queue_drain(sc);
IWN_UNLOCK(sc);
- ieee80211_draintask(ic, &sc->sc_reinit_task);
- ieee80211_draintask(ic, &sc->sc_radioon_task);
- ieee80211_draintask(ic, &sc->sc_radiooff_task);
-
+ ieee80211_draintask(&sc->sc_ic, &sc->sc_reinit_task);
+ ieee80211_draintask(&sc->sc_ic, &sc->sc_radioon_task);
+ ieee80211_draintask(&sc->sc_ic, &sc->sc_radiooff_task);
iwn_stop(sc);
taskqueue_drain_all(sc->sc_tq);
taskqueue_free(sc->sc_tq);
callout_drain(&sc->watchdog_to);
callout_drain(&sc->calib_to);
- ieee80211_ifdetach(ic);
+ ieee80211_ifdetach(&sc->sc_ic);
}
+ mbufq_drain(&sc->sc_snd);
+
/* Uninstall interrupt handler. */
if (sc->irq != NULL) {
bus_teardown_intr(dev, sc->irq, sc->sc_ih);
bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
sc->irq);
pci_release_msi(dev);
}
/* Free DMA resources. */
iwn_free_rx_ring(sc, &sc->rxq);
for (qid = 0; qid < sc->ntxqs; qid++)
iwn_free_tx_ring(sc, &sc->txq[qid]);
iwn_free_sched(sc);
iwn_free_kw(sc);
if (sc->ict != NULL)
iwn_free_ict(sc);
iwn_free_fwmem(sc);
if (sc->mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->mem), sc->mem);
- if (ifp != NULL)
- if_free(ifp);
-
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__);
IWN_LOCK_DESTROY(sc);
return 0;
}
static int
iwn_shutdown(device_t dev)
{
struct iwn_softc *sc = device_get_softc(dev);
iwn_stop(sc);
return 0;
}
static int
iwn_suspend(device_t dev)
{
struct iwn_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
- ieee80211_suspend_all(ic);
+ ieee80211_suspend_all(&sc->sc_ic);
return 0;
}
static int
iwn_resume(device_t dev)
{
struct iwn_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
/* Clear device-specific "PCI retry timeout" register (41h). */
pci_write_config(dev, 0x41, 0, 1);
- ieee80211_resume_all(ic);
+ ieee80211_resume_all(&sc->sc_ic);
return 0;
}
static int
iwn_nic_lock(struct iwn_softc *sc)
{
int ntries;
/* Request exclusive access to NIC. */
IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
/* Spin until we actually get the lock. */
for (ntries = 0; ntries < 1000; ntries++) {
if ((IWN_READ(sc, IWN_GP_CNTRL) &
(IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
IWN_GP_CNTRL_MAC_ACCESS_ENA)
return 0;
DELAY(10);
}
return ETIMEDOUT;
}
static __inline void
iwn_nic_unlock(struct iwn_softc *sc)
{
IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
}
static __inline uint32_t
iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
{
IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
IWN_BARRIER_READ_WRITE(sc);
return IWN_READ(sc, IWN_PRPH_RDATA);
}
static __inline void
iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
{
IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
IWN_BARRIER_WRITE(sc);
IWN_WRITE(sc, IWN_PRPH_WDATA, data);
}
static __inline void
iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
{
iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
}
static __inline void
iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
{
iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
}
static __inline void
iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
const uint32_t *data, int count)
{
for (; count > 0; count--, data++, addr += 4)
iwn_prph_write(sc, addr, *data);
}
static __inline uint32_t
iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
{
IWN_WRITE(sc, IWN_MEM_RADDR, addr);
IWN_BARRIER_READ_WRITE(sc);
return IWN_READ(sc, IWN_MEM_RDATA);
}
static __inline void
iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
{
IWN_WRITE(sc, IWN_MEM_WADDR, addr);
IWN_BARRIER_WRITE(sc);
IWN_WRITE(sc, IWN_MEM_WDATA, data);
}
static __inline void
iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
{
uint32_t tmp;
tmp = iwn_mem_read(sc, addr & ~3);
if (addr & 3)
tmp = (tmp & 0x0000ffff) | data << 16;
else
tmp = (tmp & 0xffff0000) | data;
iwn_mem_write(sc, addr & ~3, tmp);
}
static __inline void
iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
int count)
{
for (; count > 0; count--, addr += 4)
*data++ = iwn_mem_read(sc, addr);
}
static __inline void
iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
int count)
{
for (; count > 0; count--, addr += 4)
iwn_mem_write(sc, addr, val);
}
static int
iwn_eeprom_lock(struct iwn_softc *sc)
{
int i, ntries;
for (i = 0; i < 100; i++) {
/* Request exclusive access to EEPROM. */
IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
IWN_HW_IF_CONFIG_EEPROM_LOCKED);
/* Spin until we actually get the lock. */
for (ntries = 0; ntries < 100; ntries++) {
if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
IWN_HW_IF_CONFIG_EEPROM_LOCKED)
return 0;
DELAY(10);
}
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__);
return ETIMEDOUT;
}
static __inline void
iwn_eeprom_unlock(struct iwn_softc *sc)
{
IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
}
/*
* Initialize access by host to One Time Programmable ROM.
* NB: This kind of ROM can be found on 1000 or 6000 Series only.
*/
static int
iwn_init_otprom(struct iwn_softc *sc)
{
uint16_t prev, base, next;
int count, error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Wait for clock stabilization before accessing prph. */
if ((error = iwn_clock_wait(sc)) != 0)
return error;
if ((error = iwn_nic_lock(sc)) != 0)
return error;
iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
DELAY(5);
iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
iwn_nic_unlock(sc);
/* Set auto clock gate disable bit for HW with OTP shadow RAM. */
if (sc->base_params->shadow_ram_support) {
IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
IWN_RESET_LINK_PWR_MGMT_DIS);
}
IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
/* Clear ECC status. */
IWN_SETBITS(sc, IWN_OTP_GP,
IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
/*
* Find the block before last block (contains the EEPROM image)
* for HW without OTP shadow RAM.
*/
if (! sc->base_params->shadow_ram_support) {
/* Switch to absolute addressing mode. */
IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
base = prev = 0;
for (count = 0; count < sc->base_params->max_ll_items;
count++) {
error = iwn_read_prom_data(sc, base, &next, 2);
if (error != 0)
return error;
if (next == 0) /* End of linked-list. */
break;
prev = base;
base = le16toh(next);
}
if (count == 0 || count == sc->base_params->max_ll_items)
return EIO;
/* Skip "next" word. */
sc->prom_base = prev + 1;
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
return 0;
}
static int
iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
{
uint8_t *out = data;
uint32_t val, tmp;
int ntries;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
addr += sc->prom_base;
for (; count > 0; count -= 2, addr++) {
IWN_WRITE(sc, IWN_EEPROM, addr << 2);
for (ntries = 0; ntries < 10; ntries++) {
val = IWN_READ(sc, IWN_EEPROM);
if (val & IWN_EEPROM_READ_VALID)
break;
DELAY(5);
}
if (ntries == 10) {
device_printf(sc->sc_dev,
"timeout reading ROM at 0x%x\n", addr);
return ETIMEDOUT;
}
if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
/* OTPROM, check for ECC errors. */
tmp = IWN_READ(sc, IWN_OTP_GP);
if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
device_printf(sc->sc_dev,
"OTPROM ECC error at 0x%x\n", addr);
return EIO;
}
if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
/* Correctable ECC error, clear bit. */
IWN_SETBITS(sc, IWN_OTP_GP,
IWN_OTP_GP_ECC_CORR_STTS);
}
}
*out++ = val >> 16;
if (count > 1)
*out++ = val >> 24;
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
return 0;
}
static void
iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
if (error != 0)
return;
KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma,
void **kvap, bus_size_t size, bus_size_t alignment)
{
int error;
dma->tag = NULL;
dma->size = size;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
if (error != 0)
goto fail;
error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
if (error != 0)
goto fail;
error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
if (error != 0)
goto fail;
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
if (kvap != NULL)
*kvap = dma->vaddr;
return 0;
fail: iwn_dma_contig_free(dma);
return error;
}
static void
iwn_dma_contig_free(struct iwn_dma_info *dma)
{
if (dma->vaddr != NULL) {
bus_dmamap_sync(dma->tag, dma->map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dma->tag, dma->map);
bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
dma->vaddr = NULL;
}
if (dma->tag != NULL) {
bus_dma_tag_destroy(dma->tag);
dma->tag = NULL;
}
}
static int
iwn_alloc_sched(struct iwn_softc *sc)
{
/* TX scheduler rings must be aligned on a 1KB boundary. */
return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched,
sc->schedsz, 1024);
}
static void
iwn_free_sched(struct iwn_softc *sc)
{
iwn_dma_contig_free(&sc->sched_dma);
}
static int
iwn_alloc_kw(struct iwn_softc *sc)
{
/* "Keep Warm" page must be aligned on a 4KB boundary. */
return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096);
}
static void
iwn_free_kw(struct iwn_softc *sc)
{
iwn_dma_contig_free(&sc->kw_dma);
}
static int
iwn_alloc_ict(struct iwn_softc *sc)
{
/* ICT table must be aligned on a 4KB boundary. */
return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict,
IWN_ICT_SIZE, 4096);
}
static void
iwn_free_ict(struct iwn_softc *sc)
{
iwn_dma_contig_free(&sc->ict_dma);
}
static int
iwn_alloc_fwmem(struct iwn_softc *sc)
{
/* Must be aligned on a 16-byte boundary. */
return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16);
}
static void
iwn_free_fwmem(struct iwn_softc *sc)
{
iwn_dma_contig_free(&sc->fw_dma);
}
static int
iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
{
bus_size_t size;
int i, error;
ring->cur = 0;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Allocate RX descriptors (256-byte aligned). */
size = IWN_RX_RING_COUNT * sizeof (uint32_t);
error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
size, 256);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not allocate RX ring DMA memory, error %d\n",
__func__, error);
goto fail;
}
/* Allocate RX status area (16-byte aligned). */
error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat,
sizeof (struct iwn_rx_status), 16);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not allocate RX status DMA memory, error %d\n",
__func__, error);
goto fail;
}
/* Create RX buffer DMA tag. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, BUS_DMA_NOWAIT, NULL, NULL,
&ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create RX buf DMA tag, error %d\n",
__func__, error);
goto fail;
}
/*
* Allocate and map RX buffers.
*/
for (i = 0; i < IWN_RX_RING_COUNT; i++) {
struct iwn_rx_data *data = &ring->data[i];
bus_addr_t paddr;
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create RX buf DMA map, error %d\n",
__func__, error);
goto fail;
}
data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
IWN_RBUF_SIZE);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"%s: could not allocate RX mbuf\n", __func__);
error = ENOBUFS;
goto fail;
}
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
&paddr, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev,
"%s: can't map mbuf, error %d\n", __func__,
error);
goto fail;
}
/* Set physical address of RX buffer (256-byte aligned). */
ring->desc[i] = htole32(paddr >> 8);
}
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return 0;
fail: iwn_free_rx_ring(sc, ring);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
return error;
}
static void
iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
{
int ntries;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
if (iwn_nic_lock(sc) == 0) {
IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
for (ntries = 0; ntries < 1000; ntries++) {
if (IWN_READ(sc, IWN_FH_RX_STATUS) &
IWN_FH_RX_STATUS_IDLE)
break;
DELAY(10);
}
iwn_nic_unlock(sc);
}
ring->cur = 0;
sc->last_rx_valid = 0;
}
static void
iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
{
int i;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
iwn_dma_contig_free(&ring->desc_dma);
iwn_dma_contig_free(&ring->stat_dma);
for (i = 0; i < IWN_RX_RING_COUNT; i++) {
struct iwn_rx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
if (ring->data_dmat != NULL) {
bus_dma_tag_destroy(ring->data_dmat);
ring->data_dmat = NULL;
}
}
static int
iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
{
bus_addr_t paddr;
bus_size_t size;
int i, error;
ring->qid = qid;
ring->queued = 0;
ring->cur = 0;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Allocate TX descriptors (256-byte aligned). */
size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
size, 256);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not allocate TX ring DMA memory, error %d\n",
__func__, error);
goto fail;
}
size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
size, 4);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not allocate TX cmd DMA memory, error %d\n",
__func__, error);
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
&ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create TX buf DMA tag, error %d\n",
__func__, error);
goto fail;
}
paddr = ring->cmd_dma.paddr;
for (i = 0; i < IWN_TX_RING_COUNT; i++) {
struct iwn_tx_data *data = &ring->data[i];
data->cmd_paddr = paddr;
data->scratch_paddr = paddr + 12;
paddr += sizeof (struct iwn_tx_cmd);
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create TX buf DMA map, error %d\n",
__func__, error);
goto fail;
}
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
return 0;
fail: iwn_free_tx_ring(sc, ring);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
return error;
}
static void
iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
{
int i;
DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__);
for (i = 0; i < IWN_TX_RING_COUNT; i++) {
struct iwn_tx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
}
/* Clear TX descriptors. */
memset(ring->desc, 0, ring->desc_dma.size);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
sc->qfullmsk &= ~(1 << ring->qid);
ring->queued = 0;
ring->cur = 0;
}
static void
iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
{
int i;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__);
iwn_dma_contig_free(&ring->desc_dma);
iwn_dma_contig_free(&ring->cmd_dma);
for (i = 0; i < IWN_TX_RING_COUNT; i++) {
struct iwn_tx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
if (ring->data_dmat != NULL) {
bus_dma_tag_destroy(ring->data_dmat);
ring->data_dmat = NULL;
}
}
static void
iwn5000_ict_reset(struct iwn_softc *sc)
{
/* Disable interrupts. */
IWN_WRITE(sc, IWN_INT_MASK, 0);
/* Reset ICT table. */
memset(sc->ict, 0, IWN_ICT_SIZE);
sc->ict_cur = 0;
/* Set physical address of ICT table (4KB aligned). */
DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__);
IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
/* Enable periodic RX interrupt. */
sc->int_mask |= IWN_INT_RX_PERIODIC;
/* Switch to ICT interrupt mode in driver. */
sc->sc_flags |= IWN_FLAG_USE_ICT;
/* Re-enable interrupts. */
IWN_WRITE(sc, IWN_INT, 0xffffffff);
IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
}
static int
iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
{
struct iwn_ops *ops = &sc->ops;
uint16_t val;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Check whether adapter has an EEPROM or an OTPROM. */
if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
(IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n",
(sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM");
/* Adapter has to be powered on for EEPROM access to work. */
if ((error = iwn_apm_init(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not power ON adapter, error %d\n", __func__,
error);
return error;
}
if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__);
return EIO;
}
if ((error = iwn_eeprom_lock(sc)) != 0) {
device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n",
__func__, error);
return error;
}
if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
if ((error = iwn_init_otprom(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not initialize OTPROM, error %d\n",
__func__, error);
return error;
}
}
iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val));
/* Check if HT support is bonded out. */
if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
sc->sc_flags |= IWN_FLAG_HAS_11N;
iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
sc->rfcfg = le16toh(val);
DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg);
/* Read Tx/Rx chains from ROM unless it's known to be broken. */
if (sc->txchainmask == 0)
sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
if (sc->rxchainmask == 0)
sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
/* Read MAC address. */
iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6);
/* Read adapter-specific information from EEPROM. */
ops->read_eeprom(sc);
iwn_apm_stop(sc); /* Power OFF adapter. */
iwn_eeprom_unlock(sc);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
return 0;
}
static void
iwn4965_read_eeprom(struct iwn_softc *sc)
{
uint32_t addr;
uint16_t val;
int i;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Read regulatory domain (4 ASCII characters). */
iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
/* Read the list of authorized channels (20MHz ones only). */
for (i = 0; i < IWN_NBANDS - 1; i++) {
addr = iwn4965_regulatory_bands[i];
iwn_read_eeprom_channels(sc, i, addr);
}
/* Read maximum allowed TX power for 2GHz and 5GHz bands. */
iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
sc->maxpwr2GHz = val & 0xff;
sc->maxpwr5GHz = val >> 8;
/* Check that EEPROM values are within valid range. */
if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
sc->maxpwr5GHz = 38;
if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
sc->maxpwr2GHz = 38;
DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n",
sc->maxpwr2GHz, sc->maxpwr5GHz);
/* Read samples for each TX power group. */
iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
sizeof sc->bands);
/* Read voltage at which samples were taken. */
iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
sc->eeprom_voltage = (int16_t)le16toh(val);
DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n",
sc->eeprom_voltage);
#ifdef IWN_DEBUG
/* Print samples. */
if (sc->sc_debug & IWN_DEBUG_ANY) {
for (i = 0; i < IWN_NBANDS - 1; i++)
iwn4965_print_power_group(sc, i);
}
#endif
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
}
#ifdef IWN_DEBUG
static void
iwn4965_print_power_group(struct iwn_softc *sc, int i)
{
struct iwn4965_eeprom_band *band = &sc->bands[i];
struct iwn4965_eeprom_chan_samples *chans = band->chans;
int j, c;
printf("===band %d===\n", i);
printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
printf("chan1 num=%d\n", chans[0].num);
for (c = 0; c < 2; c++) {
for (j = 0; j < IWN_NSAMPLES; j++) {
printf("chain %d, sample %d: temp=%d gain=%d "
"power=%d pa_det=%d\n", c, j,
chans[0].samples[c][j].temp,
chans[0].samples[c][j].gain,
chans[0].samples[c][j].power,
chans[0].samples[c][j].pa_det);
}
}
printf("chan2 num=%d\n", chans[1].num);
for (c = 0; c < 2; c++) {
for (j = 0; j < IWN_NSAMPLES; j++) {
printf("chain %d, sample %d: temp=%d gain=%d "
"power=%d pa_det=%d\n", c, j,
chans[1].samples[c][j].temp,
chans[1].samples[c][j].gain,
chans[1].samples[c][j].power,
chans[1].samples[c][j].pa_det);
}
}
}
#endif
static void
iwn5000_read_eeprom(struct iwn_softc *sc)
{
struct iwn5000_eeprom_calib_hdr hdr;
int32_t volt;
uint32_t base, addr;
uint16_t val;
int i;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Read regulatory domain (4 ASCII characters). */
iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
base = le16toh(val);
iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
sc->eeprom_domain, 4);
/* Read the list of authorized channels (20MHz ones only). */
for (i = 0; i < IWN_NBANDS - 1; i++) {
addr = base + sc->base_params->regulatory_bands[i];
iwn_read_eeprom_channels(sc, i, addr);
}
/* Read enhanced TX power information for 6000 Series. */
if (sc->base_params->enhanced_TX_power)
iwn_read_eeprom_enhinfo(sc);
iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
base = le16toh(val);
iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: calib version=%u pa type=%u voltage=%u\n", __func__,
hdr.version, hdr.pa_type, le16toh(hdr.volt));
sc->calib_ver = hdr.version;
if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) {
sc->eeprom_voltage = le16toh(hdr.volt);
iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
sc->eeprom_temp_high=le16toh(val);
iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
sc->eeprom_temp = le16toh(val);
}
if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
/* Compute temperature offset. */
iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
sc->eeprom_temp = le16toh(val);
iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
volt = le16toh(val);
sc->temp_off = sc->eeprom_temp - (volt / -5);
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n",
sc->eeprom_temp, volt, sc->temp_off);
} else {
/* Read crystal calibration. */
iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
&sc->eeprom_crystal, sizeof (uint32_t));
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n",
le32toh(sc->eeprom_crystal));
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
}
/*
* Translate EEPROM flags to net80211.
*/
static uint32_t
iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel)
{
uint32_t nflags;
nflags = 0;
if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0)
nflags |= IEEE80211_CHAN_PASSIVE;
if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0)
nflags |= IEEE80211_CHAN_NOADHOC;
if (channel->flags & IWN_EEPROM_CHAN_RADAR) {
nflags |= IEEE80211_CHAN_DFS;
/* XXX apparently IBSS may still be marked */
nflags |= IEEE80211_CHAN_NOADHOC;
}
return nflags;
}
static void
iwn_read_eeprom_band(struct iwn_softc *sc, int n)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
const struct iwn_chan_band *band = &iwn_bands[n];
struct ieee80211_channel *c;
uint8_t chan;
int i, nflags;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
for (i = 0; i < band->nchan; i++) {
if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
DPRINTF(sc, IWN_DEBUG_RESET,
"skip chan %d flags 0x%x maxpwr %d\n",
band->chan[i], channels[i].flags,
channels[i].maxpwr);
continue;
}
chan = band->chan[i];
nflags = iwn_eeprom_channel_flags(&channels[i]);
c = &ic->ic_channels[ic->ic_nchans++];
c->ic_ieee = chan;
c->ic_maxregpower = channels[i].maxpwr;
c->ic_maxpower = 2*c->ic_maxregpower;
if (n == 0) { /* 2GHz band */
c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_G);
/* G =>'s B is supported */
c->ic_flags = IEEE80211_CHAN_B | nflags;
c = &ic->ic_channels[ic->ic_nchans++];
c[0] = c[-1];
c->ic_flags = IEEE80211_CHAN_G | nflags;
} else { /* 5GHz band */
c->ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_A);
c->ic_flags = IEEE80211_CHAN_A | nflags;
}
/* Save maximum allowed TX power for this channel. */
sc->maxpwr[chan] = channels[i].maxpwr;
DPRINTF(sc, IWN_DEBUG_RESET,
"add chan %d flags 0x%x maxpwr %d\n", chan,
channels[i].flags, channels[i].maxpwr);
if (sc->sc_flags & IWN_FLAG_HAS_11N) {
/* add HT20, HT40 added separately */
c = &ic->ic_channels[ic->ic_nchans++];
c[0] = c[-1];
c->ic_flags |= IEEE80211_CHAN_HT20;
}
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
}
static void
iwn_read_eeprom_ht40(struct iwn_softc *sc, int n)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct iwn_eeprom_chan *channels = sc->eeprom_channels[n];
const struct iwn_chan_band *band = &iwn_bands[n];
struct ieee80211_channel *c, *cent, *extc;
uint8_t chan;
int i, nflags;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__);
if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) {
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__);
return;
}
for (i = 0; i < band->nchan; i++) {
if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) {
DPRINTF(sc, IWN_DEBUG_RESET,
"skip chan %d flags 0x%x maxpwr %d\n",
band->chan[i], channels[i].flags,
channels[i].maxpwr);
continue;
}
chan = band->chan[i];
nflags = iwn_eeprom_channel_flags(&channels[i]);
/*
* Each entry defines an HT40 channel pair; find the
* center channel, then the extension channel above.
*/
cent = ieee80211_find_channel_byieee(ic, chan,
(n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
if (cent == NULL) { /* XXX shouldn't happen */
device_printf(sc->sc_dev,
"%s: no entry for channel %d\n", __func__, chan);
continue;
}
extc = ieee80211_find_channel(ic, cent->ic_freq+20,
(n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A));
if (extc == NULL) {
DPRINTF(sc, IWN_DEBUG_RESET,
"%s: skip chan %d, extension channel not found\n",
__func__, chan);
continue;
}
DPRINTF(sc, IWN_DEBUG_RESET,
"add ht40 chan %d flags 0x%x maxpwr %d\n",
chan, channels[i].flags, channels[i].maxpwr);
c = &ic->ic_channels[ic->ic_nchans++];
c[0] = cent[0];
c->ic_extieee = extc->ic_ieee;
c->ic_flags &= ~IEEE80211_CHAN_HT;
c->ic_flags |= IEEE80211_CHAN_HT40U | nflags;
c = &ic->ic_channels[ic->ic_nchans++];
c[0] = extc[0];
c->ic_extieee = cent->ic_ieee;
c->ic_flags &= ~IEEE80211_CHAN_HT;
c->ic_flags |= IEEE80211_CHAN_HT40D | nflags;
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
}
static void
iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n],
iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan));
if (n < 5)
iwn_read_eeprom_band(sc, n);
else
iwn_read_eeprom_ht40(sc, n);
ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
}
static struct iwn_eeprom_chan *
iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c)
{
int band, chan, i, j;
if (IEEE80211_IS_CHAN_HT40(c)) {
band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5;
if (IEEE80211_IS_CHAN_HT40D(c))
chan = c->ic_extieee;
else
chan = c->ic_ieee;
for (i = 0; i < iwn_bands[band].nchan; i++) {
if (iwn_bands[band].chan[i] == chan)
return &sc->eeprom_channels[band][i];
}
} else {
for (j = 0; j < 5; j++) {
for (i = 0; i < iwn_bands[j].nchan; i++) {
if (iwn_bands[j].chan[i] == c->ic_ieee)
return &sc->eeprom_channels[j][i];
}
}
}
return NULL;
}
/*
* Enforce flags read from EEPROM.
*/
static int
iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
int nchan, struct ieee80211_channel chans[])
{
struct iwn_softc *sc = ic->ic_softc;
int i;
for (i = 0; i < nchan; i++) {
struct ieee80211_channel *c = &chans[i];
struct iwn_eeprom_chan *channel;
channel = iwn_find_eeprom_channel(sc, c);
if (channel == NULL) {
ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n",
__func__, c->ic_ieee, c->ic_freq, c->ic_flags);
return EINVAL;
}
c->ic_flags |= iwn_eeprom_channel_flags(channel);
}
return 0;
}
static void
iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
{
struct iwn_eeprom_enhinfo enhinfo[35];
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_channel *c;
uint16_t val, base;
int8_t maxpwr;
uint8_t flags;
int i, j;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
base = le16toh(val);
iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
enhinfo, sizeof enhinfo);
for (i = 0; i < nitems(enhinfo); i++) {
flags = enhinfo[i].flags;
if (!(flags & IWN_ENHINFO_VALID))
continue; /* Skip invalid entries. */
maxpwr = 0;
if (sc->txchainmask & IWN_ANT_A)
maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
if (sc->txchainmask & IWN_ANT_B)
maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
if (sc->txchainmask & IWN_ANT_C)
maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
if (sc->ntxchains == 2)
maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
else if (sc->ntxchains == 3)
maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
for (j = 0; j < ic->ic_nchans; j++) {
c = &ic->ic_channels[j];
if ((flags & IWN_ENHINFO_5GHZ)) {
if (!IEEE80211_IS_CHAN_A(c))
continue;
} else if ((flags & IWN_ENHINFO_OFDM)) {
if (!IEEE80211_IS_CHAN_G(c))
continue;
} else if (!IEEE80211_IS_CHAN_B(c))
continue;
if ((flags & IWN_ENHINFO_HT40)) {
if (!IEEE80211_IS_CHAN_HT40(c))
continue;
} else {
if (IEEE80211_IS_CHAN_HT40(c))
continue;
}
if (enhinfo[i].chan != 0 &&
enhinfo[i].chan != c->ic_ieee)
continue;
DPRINTF(sc, IWN_DEBUG_RESET,
"channel %d(%x), maxpwr %d\n", c->ic_ieee,
c->ic_flags, maxpwr / 2);
c->ic_maxregpower = maxpwr / 2;
c->ic_maxpower = maxpwr;
}
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__);
}
static struct ieee80211_node *
iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO);
}
static __inline int
rate2plcp(int rate)
{
switch (rate & 0xff) {
case 12: return 0xd;
case 18: return 0xf;
case 24: return 0x5;
case 36: return 0x7;
case 48: return 0x9;
case 72: return 0xb;
case 96: return 0x1;
case 108: return 0x3;
case 2: return 10;
case 4: return 20;
case 11: return 55;
case 22: return 110;
}
return 0;
}
static int
iwn_get_1stream_tx_antmask(struct iwn_softc *sc)
{
return IWN_LSB(sc->txchainmask);
}
static int
iwn_get_2stream_tx_antmask(struct iwn_softc *sc)
{
int tx;
/*
* The '2 stream' setup is a bit .. odd.
*
* For NICs that support only 1 antenna, default to IWN_ANT_AB or
* the firmware panics (eg Intel 5100.)
*
* For NICs that support two antennas, we use ANT_AB.
*
* For NICs that support three antennas, we use the two that
* wasn't the default one.
*
* XXX TODO: if bluetooth (full concurrent) is enabled, restrict
* this to only one antenna.
*/
/* Default - transmit on the other antennas */
tx = (sc->txchainmask & ~IWN_LSB(sc->txchainmask));
/* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */
if (tx == 0)
tx = IWN_ANT_AB;
/*
* If the NIC is a two-stream TX NIC, configure the TX mask to
* the default chainmask
*/
else if (sc->ntxchains == 2)
tx = sc->txchainmask;
return (tx);
}
/*
* Calculate the required PLCP value from the given rate,
* to the given node.
*
* This will take the node configuration (eg 11n, rate table
* setup, etc) into consideration.
*/
static uint32_t
iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni,
uint8_t rate)
{
#define RV(v) ((v) & IEEE80211_RATE_VAL)
struct ieee80211com *ic = ni->ni_ic;
uint32_t plcp = 0;
int ridx;
/*
* If it's an MCS rate, let's set the plcp correctly
* and set the relevant flags based on the node config.
*/
if (rate & IEEE80211_RATE_MCS) {
/*
* Set the initial PLCP value to be between 0->31 for
* MCS 0 -> MCS 31, then set the "I'm an MCS rate!"
* flag.
*/
plcp = RV(rate) | IWN_RFLAG_MCS;
/*
* XXX the following should only occur if both
* the local configuration _and_ the remote node
* advertise these capabilities. Thus this code
* may need fixing!
*/
/*
* Set the channel width and guard interval.
*/
if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
plcp |= IWN_RFLAG_HT40;
if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
plcp |= IWN_RFLAG_SGI;
} else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
plcp |= IWN_RFLAG_SGI;
}
/*
* Ensure the selected rate matches the link quality
* table entries being used.
*/
if (rate > 0x8f)
plcp |= IWN_RFLAG_ANT(sc->txchainmask);
else if (rate > 0x87)
plcp |= IWN_RFLAG_ANT(iwn_get_2stream_tx_antmask(sc));
else
plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
} else {
/*
* Set the initial PLCP - fine for both
* OFDM and CCK rates.
*/
plcp = rate2plcp(rate);
/* Set CCK flag if it's CCK */
/* XXX It would be nice to have a method
* to map the ridx -> phy table entry
* so we could just query that, rather than
* this hack to check against IWN_RIDX_OFDM6.
*/
ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
rate & IEEE80211_RATE_VAL);
if (ridx < IWN_RIDX_OFDM6 &&
IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
plcp |= IWN_RFLAG_CCK;
/* Set antenna configuration */
/* XXX TODO: is this the right antenna to use for legacy? */
plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc));
}
DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n",
__func__,
rate,
plcp);
return (htole32(plcp));
#undef RV
}
static void
iwn_newassoc(struct ieee80211_node *ni, int isnew)
{
/* Doesn't do anything at the moment */
}
static int
iwn_media_change(struct ifnet *ifp)
{
int error;
error = ieee80211_media_change(ifp);
/* NB: only the fixed rate can change and that doesn't need a reset */
return (error == ENETRESET ? 0 : error);
}
static int
iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct iwn_vap *ivp = IWN_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct iwn_softc *sc = ic->ic_softc;
int error = 0;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
IWN_LOCK(sc);
callout_stop(&sc->calib_to);
sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
switch (nstate) {
case IEEE80211_S_ASSOC:
if (vap->iv_state != IEEE80211_S_RUN)
break;
/* FALLTHROUGH */
case IEEE80211_S_AUTH:
if (vap->iv_state == IEEE80211_S_AUTH)
break;
/*
* !AUTH -> AUTH transition requires state reset to handle
* reassociations correctly.
*/
sc->rxon->associd = 0;
sc->rxon->filter &= ~htole32(IWN_FILTER_BSS);
sc->calib.state = IWN_CALIB_STATE_INIT;
/* Wait until we hear a beacon before we transmit */
sc->sc_beacon_wait = 1;
if ((error = iwn_auth(sc, vap)) != 0) {
device_printf(sc->sc_dev,
"%s: could not move to auth state\n", __func__);
}
break;
case IEEE80211_S_RUN:
/*
* RUN -> RUN transition; Just restart the timers.
*/
if (vap->iv_state == IEEE80211_S_RUN) {
sc->calib_cnt = 0;
break;
}
/* Wait until we hear a beacon before we transmit */
sc->sc_beacon_wait = 1;
/*
* !RUN -> RUN requires setting the association id
* which is done with a firmware cmd. We also defer
* starting the timers until that work is done.
*/
if ((error = iwn_run(sc, vap)) != 0) {
device_printf(sc->sc_dev,
"%s: could not move to run state\n", __func__);
}
break;
case IEEE80211_S_INIT:
sc->calib.state = IWN_CALIB_STATE_INIT;
/*
* Purge the xmit queue so we don't have old frames
* during a new association attempt.
*/
sc->sc_beacon_wait = 0;
iwn_xmit_queue_drain(sc);
break;
default:
break;
}
IWN_UNLOCK(sc);
IEEE80211_LOCK(ic);
if (error != 0){
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
return error;
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return ivp->iv_newstate(vap, nstate, arg);
}
static void
iwn_calib_timeout(void *arg)
{
struct iwn_softc *sc = arg;
IWN_LOCK_ASSERT(sc);
/* Force automatic TX power calibration every 60 secs. */
if (++sc->calib_cnt >= 120) {
uint32_t flags = 0;
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n",
"sending request for statistics");
(void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
sizeof flags, 1);
sc->calib_cnt = 0;
}
callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
sc);
}
/*
* Process an RX_PHY firmware notification. This is usually immediately
* followed by an MPDU_RX_DONE notification.
*/
static void
iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
struct iwn_rx_data *data)
{
struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__);
bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
/* Save RX statistics, they will be used on MPDU_RX_DONE. */
memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
sc->last_rx_valid = 1;
}
/*
* Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
* Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
*/
static void
iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
struct iwn_rx_data *data)
{
struct iwn_ops *ops = &sc->ops;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct iwn_rx_ring *ring = &sc->rxq;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *m, *m1;
struct iwn_rx_stat *stat;
caddr_t head;
bus_addr_t paddr;
uint32_t flags;
int error, len, rssi, nf;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
if (desc->type == IWN_MPDU_RX_DONE) {
/* Check for prior RX_PHY notification. */
if (!sc->last_rx_valid) {
DPRINTF(sc, IWN_DEBUG_ANY,
"%s: missing RX_PHY\n", __func__);
return;
}
stat = &sc->last_rx_stat;
} else
stat = (struct iwn_rx_stat *)(desc + 1);
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
device_printf(sc->sc_dev,
"%s: invalid RX statistic header, len %d\n", __func__,
stat->cfg_phy_len);
return;
}
if (desc->type == IWN_MPDU_RX_DONE) {
struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
head = (caddr_t)(mpdu + 1);
len = le16toh(mpdu->len);
} else {
head = (caddr_t)(stat + 1) + stat->cfg_phy_len;
len = le16toh(stat->len);
}
flags = le32toh(*(uint32_t *)(head + len));
/* Discard frames with a bad FCS early. */
if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n",
__func__, flags);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
/* Discard frames that are too short. */
if (len < sizeof (struct ieee80211_frame_ack)) {
DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n",
__func__, len);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
if (m1 == NULL) {
DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
__func__);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
bus_dmamap_unload(ring->data_dmat, data->map);
error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev,
"%s: bus_dmamap_load failed, error %d\n", __func__, error);
m_freem(m1);
/* Try to reload the old mbuf. */
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr,
&paddr, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
panic("%s: could not load old RX mbuf", __func__);
}
/* Physical address may have changed. */
ring->desc[ring->cur] = htole32(paddr >> 8);
bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
m = data->m;
data->m = m1;
/* Update RX descriptor. */
ring->desc[ring->cur] = htole32(paddr >> 8);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
/* Finalize mbuf. */
- m->m_pkthdr.rcvif = ifp;
m->m_data = head;
m->m_pkthdr.len = m->m_len = len;
/* Grab a reference to the source node. */
wh = mtod(m, struct ieee80211_frame *);
if (len >= sizeof(struct ieee80211_frame_min))
ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
else
ni = NULL;
nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN &&
(ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95;
rssi = ops->get_rssi(sc, stat);
if (ieee80211_radiotap_active(ic)) {
struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
tap->wr_dbm_antsignal = (int8_t)rssi;
tap->wr_dbm_antnoise = (int8_t)nf;
tap->wr_tsft = stat->tstamp;
switch (stat->rate) {
/* CCK rates. */
case 10: tap->wr_rate = 2; break;
case 20: tap->wr_rate = 4; break;
case 55: tap->wr_rate = 11; break;
case 110: tap->wr_rate = 22; break;
/* OFDM rates. */
case 0xd: tap->wr_rate = 12; break;
case 0xf: tap->wr_rate = 18; break;
case 0x5: tap->wr_rate = 24; break;
case 0x7: tap->wr_rate = 36; break;
case 0x9: tap->wr_rate = 48; break;
case 0xb: tap->wr_rate = 72; break;
case 0x1: tap->wr_rate = 96; break;
case 0x3: tap->wr_rate = 108; break;
/* Unknown rate: should not happen. */
default: tap->wr_rate = 0;
}
}
/*
* If it's a beacon and we're waiting, then do the
* wakeup. This should unblock raw_xmit/start.
*/
if (sc->sc_beacon_wait) {
uint8_t type, subtype;
/* NB: Re-assign wh */
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
/*
* This assumes at this point we've received our own
* beacon.
*/
DPRINTF(sc, IWN_DEBUG_TRACE,
"%s: beacon_wait, type=%d, subtype=%d\n",
__func__, type, subtype);
if (type == IEEE80211_FC0_TYPE_MGT &&
subtype == IEEE80211_FC0_SUBTYPE_BEACON) {
DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT,
"%s: waking things up\n", __func__);
/* queue taskqueue to transmit! */
taskqueue_enqueue(sc->sc_tq, &sc->sc_xmit_task);
}
}
IWN_UNLOCK(sc);
/* Send the frame to the 802.11 layer. */
if (ni != NULL) {
if (ni->ni_flags & IEEE80211_NODE_HT)
m->m_flags |= M_AMPDU;
(void)ieee80211_input(ni, m, rssi - nf, nf);
/* Node is no longer needed. */
ieee80211_free_node(ni);
} else
(void)ieee80211_input_all(ic, m, rssi - nf, nf);
IWN_LOCK(sc);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
}
/* Process an incoming Compressed BlockAck. */
static void
iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
struct iwn_rx_data *data)
{
struct iwn_ops *ops = &sc->ops;
- struct ifnet *ifp = sc->sc_ifp;
struct iwn_node *wn;
struct ieee80211_node *ni;
struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
struct iwn_tx_ring *txq;
struct iwn_tx_data *txdata;
struct ieee80211_tx_ampdu *tap;
struct mbuf *m;
uint64_t bitmap;
uint16_t ssn;
uint8_t tid;
int ackfailcnt = 0, i, lastidx, qid, *res, shift;
int tx_ok = 0, tx_err = 0;
DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s begin\n", __func__);
bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
qid = le16toh(ba->qid);
txq = &sc->txq[ba->qid];
tap = sc->qid2tap[ba->qid];
tid = tap->txa_tid;
wn = (void *)tap->txa_ni;
res = NULL;
ssn = 0;
if (!IEEE80211_AMPDU_RUNNING(tap)) {
res = tap->txa_private;
ssn = tap->txa_start & 0xfff;
}
for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) {
txdata = &txq->data[txq->read];
/* Unmap and free mbuf. */
bus_dmamap_sync(txq->data_dmat, txdata->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->data_dmat, txdata->map);
m = txdata->m, txdata->m = NULL;
ni = txdata->ni, txdata->ni = NULL;
KASSERT(ni != NULL, ("no node"));
KASSERT(m != NULL, ("no mbuf"));
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
ieee80211_tx_complete(ni, m, 1);
txq->queued--;
txq->read = (txq->read + 1) % IWN_TX_RING_COUNT;
}
if (txq->queued == 0 && res != NULL) {
iwn_nic_lock(sc);
ops->ampdu_tx_stop(sc, qid, tid, ssn);
iwn_nic_unlock(sc);
sc->qid2tap[qid] = NULL;
free(res, M_DEVBUF);
return;
}
if (wn->agg[tid].bitmap == 0)
return;
shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff);
if (shift < 0)
shift += 0x100;
if (wn->agg[tid].nframes > (64 - shift))
return;
/*
* Walk the bitmap and calculate how many successful and failed
* attempts are made.
*
* Yes, the rate control code doesn't know these are A-MPDU
* subframes and that it's okay to fail some of these.
*/
ni = tap->txa_ni;
bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap;
for (i = 0; bitmap; i++) {
if ((bitmap & 1) == 0) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
tx_err ++;
ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
} else {
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
tx_ok ++;
ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
}
bitmap >>= 1;
}
DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT,
"->%s: end; %d ok; %d err\n",__func__, tx_ok, tx_err);
}
/*
* Process a CALIBRATION_RESULT notification sent by the initialization
* firmware on response to a CMD_CALIB_CONFIG command (5000 only).
*/
static void
iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
struct iwn_rx_data *data)
{
struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
int len, idx = -1;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Runtime firmware should not send such a notification. */
if (sc->sc_flags & IWN_FLAG_CALIB_DONE){
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after clib done\n",
__func__);
return;
}
len = (le32toh(desc->len) & 0x3fff) - 4;
bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
switch (calib->code) {
case IWN5000_PHY_CALIB_DC:
if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC)
idx = 0;
break;
case IWN5000_PHY_CALIB_LO:
if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO)
idx = 1;
break;
case IWN5000_PHY_CALIB_TX_IQ:
if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ)
idx = 2;
break;
case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC)
idx = 3;
break;
case IWN5000_PHY_CALIB_BASE_BAND:
if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND)
idx = 4;
break;
}
if (idx == -1) /* Ignore other results. */
return;
/* Save calibration result. */
if (sc->calibcmd[idx].buf != NULL)
free(sc->calibcmd[idx].buf, M_DEVBUF);
sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
if (sc->calibcmd[idx].buf == NULL) {
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"not enough memory for calibration result %d\n",
calib->code);
return;
}
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len);
sc->calibcmd[idx].len = len;
memcpy(sc->calibcmd[idx].buf, calib, len);
}
static void
iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib,
struct iwn_stats *stats, int len)
{
struct iwn_stats_bt *stats_bt;
struct iwn_stats *lstats;
/*
* First - check whether the length is the bluetooth or normal.
*
* If it's normal - just copy it and bump out.
* Otherwise we have to convert things.
*/
if (len == sizeof(struct iwn_stats) + 4) {
memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
sc->last_stat_valid = 1;
return;
}
/*
* If it's not the bluetooth size - log, then just copy.
*/
if (len != sizeof(struct iwn_stats_bt) + 4) {
DPRINTF(sc, IWN_DEBUG_STATS,
"%s: size of rx statistics (%d) not an expected size!\n",
__func__,
len);
memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats));
sc->last_stat_valid = 1;
return;
}
/*
* Ok. Time to copy.
*/
stats_bt = (struct iwn_stats_bt *) stats;
lstats = &sc->last_stat;
/* flags */
lstats->flags = stats_bt->flags;
/* rx_bt */
memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm,
sizeof(struct iwn_rx_phy_stats));
memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck,
sizeof(struct iwn_rx_phy_stats));
memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common,
sizeof(struct iwn_rx_general_stats));
memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht,
sizeof(struct iwn_rx_ht_phy_stats));
/* tx */
memcpy(&lstats->tx, &stats_bt->tx,
sizeof(struct iwn_tx_stats));
/* general */
memcpy(&lstats->general, &stats_bt->general,
sizeof(struct iwn_general_stats));
/* XXX TODO: Squirrel away the extra bluetooth stats somewhere */
sc->last_stat_valid = 1;
}
/*
* Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
* The latter is sent by the firmware after each received beacon.
*/
static void
iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
struct iwn_rx_data *data)
{
struct iwn_ops *ops = &sc->ops;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct iwn_calib_state *calib = &sc->calib;
struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
struct iwn_stats *lstats;
int temp;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Ignore statistics received during a scan. */
if (vap->iv_state != IEEE80211_S_RUN ||
(ic->ic_flags & IEEE80211_F_SCAN)){
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n",
__func__);
return;
}
bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD);
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS,
"%s: received statistics, cmd %d, len %d\n",
__func__, desc->type, le16toh(desc->len));
sc->calib_cnt = 0; /* Reset TX power calibration timeout. */
/*
* Collect/track general statistics for reporting.
*
* This takes care of ensuring that the bluetooth sized message
* will be correctly converted to the legacy sized message.
*/
iwn_stats_update(sc, calib, stats, le16toh(desc->len));
/*
* And now, let's take a reference of it to use!
*/
lstats = &sc->last_stat;
/* Test if temperature has changed. */
if (lstats->general.temp != sc->rawtemp) {
/* Convert "raw" temperature to degC. */
sc->rawtemp = stats->general.temp;
temp = ops->get_temperature(sc);
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n",
__func__, temp);
/* Update TX power if need be (4965AGN only). */
if (sc->hw_type == IWN_HW_REV_TYPE_4965)
iwn4965_power_calibration(sc, temp);
}
if (desc->type != IWN_BEACON_STATISTICS)
return; /* Reply to a statistics request. */
sc->noise = iwn_get_noise(&lstats->rx.general);
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise);
/* Test that RSSI and noise are present in stats report. */
if (le32toh(lstats->rx.general.flags) != 1) {
DPRINTF(sc, IWN_DEBUG_ANY, "%s\n",
"received statistics without RSSI");
return;
}
if (calib->state == IWN_CALIB_STATE_ASSOC)
iwn_collect_noise(sc, &lstats->rx.general);
else if (calib->state == IWN_CALIB_STATE_RUN) {
iwn_tune_sensitivity(sc, &lstats->rx);
/*
* XXX TODO: Only run the RX recovery if we're associated!
*/
iwn_check_rx_recovery(sc, lstats);
iwn_save_stats_counters(sc, lstats);
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
}
/*
* Save the relevant statistic counters for the next calibration
* pass.
*/
static void
iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs)
{
struct iwn_calib_state *calib = &sc->calib;
/* Save counters values for next call. */
calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp);
calib->fa_cck = le32toh(rs->rx.cck.fa);
calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp);
calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp);
calib->fa_ofdm = le32toh(rs->rx.ofdm.fa);
/* Last time we received these tick values */
sc->last_calib_ticks = ticks;
}
/*
* Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
* and 5000 adapters have different incompatible TX status formats.
*/
static void
iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
struct iwn_rx_data *data)
{
struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
struct iwn_tx_ring *ring;
int qid;
qid = desc->qid & 0xf;
ring = &sc->txq[qid];
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
"qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
__func__, desc->qid, desc->idx,
stat->rtsfailcnt,
stat->ackfailcnt,
stat->btkillcnt,
stat->rate, le16toh(stat->duration),
le32toh(stat->status));
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
if (qid >= sc->firstaggqueue) {
iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
stat->ackfailcnt, &stat->status);
} else {
iwn_tx_done(sc, desc, stat->ackfailcnt,
le32toh(stat->status) & 0xff);
}
}
static void
iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
struct iwn_rx_data *data)
{
struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
struct iwn_tx_ring *ring;
int qid;
qid = desc->qid & 0xf;
ring = &sc->txq[qid];
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: "
"qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n",
__func__, desc->qid, desc->idx,
stat->rtsfailcnt,
stat->ackfailcnt,
stat->btkillcnt,
stat->rate, le16toh(stat->duration),
le32toh(stat->status));
#ifdef notyet
/* Reset TX scheduler slot. */
iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
#endif
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
if (qid >= sc->firstaggqueue) {
iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes,
stat->ackfailcnt, &stat->status);
} else {
iwn_tx_done(sc, desc, stat->ackfailcnt,
le16toh(stat->status) & 0xff);
}
}
/*
* Adapter-independent backend for TX_DONE firmware notifications.
*/
static void
iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
uint8_t status)
{
- struct ifnet *ifp = sc->sc_ifp;
struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
struct iwn_tx_data *data = &ring->data[desc->idx];
struct mbuf *m;
struct ieee80211_node *ni;
struct ieee80211vap *vap;
KASSERT(data->ni != NULL, ("no node"));
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Unmap and free mbuf. */
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m = data->m, data->m = NULL;
ni = data->ni, data->ni = NULL;
vap = ni->ni_vap;
/*
* Update rate control statistics for the node.
*/
- if (status & IWN_TX_FAIL) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if (status & IWN_TX_FAIL)
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
- } else {
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ else
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
- }
/*
* Channels marked for "radar" require traffic to be received
* to unlock before we can transmit. Until traffic is seen
* any attempt to transmit is returned immediately with status
* set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily
* happen on first authenticate after scanning. To workaround
* this we ignore a failure of this sort in AUTH state so the
* 802.11 layer will fall back to using a timeout to wait for
* the AUTH reply. This allows the firmware time to see
* traffic so a subsequent retry of AUTH succeeds. It's
* unclear why the firmware does not maintain state for
* channels recently visited as this would allow immediate
* use of the channel after a scan (where we see traffic).
*/
if (status == IWN_TX_FAIL_TX_LOCKED &&
ni->ni_vap->iv_state == IEEE80211_S_AUTH)
ieee80211_tx_complete(ni, m, 0);
else
ieee80211_tx_complete(ni, m,
(status & IWN_TX_FAIL) != 0);
sc->sc_tx_timer = 0;
if (--ring->queued < IWN_TX_RING_LOMARK) {
sc->qfullmsk &= ~(1 << ring->qid);
- if (sc->qfullmsk == 0 &&
- (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- iwn_start_locked(ifp);
- }
+ if (sc->qfullmsk == 0)
+ iwn_start_locked(sc);
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
}
/*
* Process a "command done" firmware notification. This is where we wakeup
* processes waiting for a synchronous command completion.
*/
static void
iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
{
struct iwn_tx_ring *ring;
struct iwn_tx_data *data;
int cmd_queue_num;
if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
cmd_queue_num = IWN_PAN_CMD_QUEUE;
else
cmd_queue_num = IWN_CMD_QUEUE_NUM;
if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num)
return; /* Not a command ack. */
ring = &sc->txq[cmd_queue_num];
data = &ring->data[desc->idx];
/* If the command was mapped in an mbuf, free it. */
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
wakeup(&ring->desc[desc->idx]);
}
static void
iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes,
int ackfailcnt, void *stat)
{
struct iwn_ops *ops = &sc->ops;
- struct ifnet *ifp = sc->sc_ifp;
struct iwn_tx_ring *ring = &sc->txq[qid];
struct iwn_tx_data *data;
struct mbuf *m;
struct iwn_node *wn;
struct ieee80211_node *ni;
struct ieee80211_tx_ampdu *tap;
uint64_t bitmap;
uint32_t *status = stat;
uint16_t *aggstatus = stat;
uint16_t ssn;
uint8_t tid;
int bit, i, lastidx, *res, seqno, shift, start;
/* XXX TODO: status is le16 field! Grr */
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: nframes=%d, status=0x%08x\n",
__func__,
nframes,
*status);
tap = sc->qid2tap[qid];
tid = tap->txa_tid;
wn = (void *)tap->txa_ni;
ni = tap->txa_ni;
/*
* XXX TODO: ACK and RTS failures would be nice here!
*/
/*
* A-MPDU single frame status - if we failed to transmit it
* in A-MPDU, then it may be a permanent failure.
*
* XXX TODO: check what the Linux iwlwifi driver does here;
* there's some permanent and temporary failures that may be
* handled differently.
*/
if (nframes == 1) {
if ((*status & 0xff) != 1 && (*status & 0xff) != 2) {
#ifdef NOT_YET
printf("ieee80211_send_bar()\n");
#endif
/*
* If we completely fail a transmit, make sure a
* notification is pushed up to the rate control
* layer.
*/
ieee80211_ratectl_tx_complete(ni->ni_vap,
ni,
IEEE80211_RATECTL_TX_FAILURE,
&ackfailcnt,
NULL);
} else {
/*
* If nframes=1, then we won't be getting a BA for
* this frame. Ensure that we correctly update the
* rate control code with how many retries were
* needed to send it.
*/
ieee80211_ratectl_tx_complete(ni->ni_vap,
ni,
IEEE80211_RATECTL_TX_SUCCESS,
&ackfailcnt,
NULL);
}
}
bitmap = 0;
start = idx;
for (i = 0; i < nframes; i++) {
if (le16toh(aggstatus[i * 2]) & 0xc)
continue;
idx = le16toh(aggstatus[2*i + 1]) & 0xff;
bit = idx - start;
shift = 0;
if (bit >= 64) {
shift = 0x100 - idx + start;
bit = 0;
start = idx;
} else if (bit <= -64)
bit = 0x100 - start + idx;
else if (bit < 0) {
shift = start - idx;
start = idx;
bit = 0;
}
bitmap = bitmap << shift;
bitmap |= 1ULL << bit;
}
tap = sc->qid2tap[qid];
tid = tap->txa_tid;
wn = (void *)tap->txa_ni;
wn->agg[tid].bitmap = bitmap;
wn->agg[tid].startidx = start;
wn->agg[tid].nframes = nframes;
res = NULL;
ssn = 0;
if (!IEEE80211_AMPDU_RUNNING(tap)) {
res = tap->txa_private;
ssn = tap->txa_start & 0xfff;
}
/* This is going nframes DWORDS into the descriptor? */
seqno = le32toh(*(status + nframes)) & 0xfff;
for (lastidx = (seqno & 0xff); ring->read != lastidx;) {
data = &ring->data[ring->read];
/* Unmap and free mbuf. */
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m = data->m, data->m = NULL;
ni = data->ni, data->ni = NULL;
KASSERT(ni != NULL, ("no node"));
KASSERT(m != NULL, ("no mbuf"));
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m);
ieee80211_tx_complete(ni, m, 1);
ring->queued--;
ring->read = (ring->read + 1) % IWN_TX_RING_COUNT;
}
if (ring->queued == 0 && res != NULL) {
iwn_nic_lock(sc);
ops->ampdu_tx_stop(sc, qid, tid, ssn);
iwn_nic_unlock(sc);
sc->qid2tap[qid] = NULL;
free(res, M_DEVBUF);
return;
}
sc->sc_tx_timer = 0;
if (ring->queued < IWN_TX_RING_LOMARK) {
sc->qfullmsk &= ~(1 << ring->qid);
- if (sc->qfullmsk == 0 &&
- (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- iwn_start_locked(ifp);
- }
+ if (sc->qfullmsk == 0)
+ iwn_start_locked(sc);
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
}
/*
* Process an INT_FH_RX or INT_SW_RX interrupt.
*/
static void
iwn_notif_intr(struct iwn_softc *sc)
{
struct iwn_ops *ops = &sc->ops;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint16_t hw;
bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
BUS_DMASYNC_POSTREAD);
hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
while (sc->rxq.cur != hw) {
struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
struct iwn_rx_desc *desc;
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
desc = mtod(data->m, struct iwn_rx_desc *);
DPRINTF(sc, IWN_DEBUG_RECV,
"%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
__func__, sc->rxq.cur, desc->qid & 0xf, desc->idx, desc->flags,
desc->type, iwn_intr_str(desc->type),
le16toh(desc->len));
if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */
iwn_cmd_done(sc, desc);
switch (desc->type) {
case IWN_RX_PHY:
iwn_rx_phy(sc, desc, data);
break;
case IWN_RX_DONE: /* 4965AGN only. */
case IWN_MPDU_RX_DONE:
/* An 802.11 frame has been received. */
iwn_rx_done(sc, desc, data);
break;
case IWN_RX_COMPRESSED_BA:
/* A Compressed BlockAck has been received. */
iwn_rx_compressed_ba(sc, desc, data);
break;
case IWN_TX_DONE:
/* An 802.11 frame has been transmitted. */
ops->tx_done(sc, desc, data);
break;
case IWN_RX_STATISTICS:
case IWN_BEACON_STATISTICS:
iwn_rx_statistics(sc, desc, data);
break;
case IWN_BEACON_MISSED:
{
struct iwn_beacon_missed *miss =
(struct iwn_beacon_missed *)(desc + 1);
int misses;
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
misses = le32toh(miss->consecutive);
DPRINTF(sc, IWN_DEBUG_STATE,
"%s: beacons missed %d/%d\n", __func__,
misses, le32toh(miss->total));
/*
* If more than 5 consecutive beacons are missed,
* reinitialize the sensitivity state machine.
*/
if (vap->iv_state == IEEE80211_S_RUN &&
(ic->ic_flags & IEEE80211_F_SCAN) == 0) {
if (misses > 5)
(void)iwn_init_sensitivity(sc);
if (misses >= vap->iv_bmissthreshold) {
IWN_UNLOCK(sc);
ieee80211_beacon_miss(ic);
IWN_LOCK(sc);
}
}
break;
}
case IWN_UC_READY:
{
struct iwn_ucode_info *uc =
(struct iwn_ucode_info *)(desc + 1);
/* The microcontroller is ready. */
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
DPRINTF(sc, IWN_DEBUG_RESET,
"microcode alive notification version=%d.%d "
"subtype=%x alive=%x\n", uc->major, uc->minor,
uc->subtype, le32toh(uc->valid));
if (le32toh(uc->valid) != 1) {
device_printf(sc->sc_dev,
"microcontroller initialization failed");
break;
}
if (uc->subtype == IWN_UCODE_INIT) {
/* Save microcontroller report. */
memcpy(&sc->ucode_info, uc, sizeof (*uc));
}
/* Save the address of the error log in SRAM. */
sc->errptr = le32toh(uc->errptr);
break;
}
case IWN_STATE_CHANGED:
{
/*
* State change allows hardware switch change to be
* noted. However, we handle this in iwn_intr as we
* get both the enable/disble intr.
*/
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
#ifdef IWN_DEBUG
uint32_t *status = (uint32_t *)(desc + 1);
DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE,
"state changed to %x\n",
le32toh(*status));
#endif
break;
}
case IWN_START_SCAN:
{
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
#ifdef IWN_DEBUG
struct iwn_start_scan *scan =
(struct iwn_start_scan *)(desc + 1);
DPRINTF(sc, IWN_DEBUG_ANY,
"%s: scanning channel %d status %x\n",
__func__, scan->chan, le32toh(scan->status));
#endif
break;
}
case IWN_STOP_SCAN:
{
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
#ifdef IWN_DEBUG
struct iwn_stop_scan *scan =
(struct iwn_stop_scan *)(desc + 1);
DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN,
"scan finished nchan=%d status=%d chan=%d\n",
scan->nchan, scan->status, scan->chan);
#endif
sc->sc_is_scanning = 0;
IWN_UNLOCK(sc);
ieee80211_scan_next(vap);
IWN_LOCK(sc);
break;
}
case IWN5000_CALIBRATION_RESULT:
iwn5000_rx_calib_results(sc, desc, data);
break;
case IWN5000_CALIBRATION_DONE:
sc->sc_flags |= IWN_FLAG_CALIB_DONE;
wakeup(sc);
break;
}
sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
}
/* Tell the firmware what we have processed. */
hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
}
/*
* Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
* from power-down sleep mode.
*/
static void
iwn_wakeup_intr(struct iwn_softc *sc)
{
int qid;
DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n",
__func__);
/* Wakeup RX and TX rings. */
IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
for (qid = 0; qid < sc->ntxqs; qid++) {
struct iwn_tx_ring *ring = &sc->txq[qid];
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
}
}
static void
iwn_rftoggle_intr(struct iwn_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp = IWN_READ(sc, IWN_GP_CNTRL);
IWN_LOCK_ASSERT(sc);
device_printf(sc->sc_dev, "RF switch: radio %s\n",
(tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
if (tmp & IWN_GP_CNTRL_RFKILL)
ieee80211_runtask(ic, &sc->sc_radioon_task);
else
ieee80211_runtask(ic, &sc->sc_radiooff_task);
}
/*
* Dump the error log of the firmware when a firmware panic occurs. Although
* we can't debug the firmware because it is neither open source nor free, it
* can help us to identify certain classes of problems.
*/
static void
iwn_fatal_intr(struct iwn_softc *sc)
{
struct iwn_fw_dump dump;
int i;
IWN_LOCK_ASSERT(sc);
/* Force a complete recalibration on next init. */
sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
/* Check that the error log address is valid. */
if (sc->errptr < IWN_FW_DATA_BASE ||
sc->errptr + sizeof (dump) >
IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
printf("%s: bad firmware error log address 0x%08x\n", __func__,
sc->errptr);
return;
}
if (iwn_nic_lock(sc) != 0) {
printf("%s: could not read firmware error log\n", __func__);
return;
}
/* Read firmware error log from SRAM. */
iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
sizeof (dump) / sizeof (uint32_t));
iwn_nic_unlock(sc);
if (dump.valid == 0) {
printf("%s: firmware error log is empty\n", __func__);
return;
}
printf("firmware error log:\n");
printf(" error type = \"%s\" (0x%08X)\n",
(dump.id < nitems(iwn_fw_errmsg)) ?
iwn_fw_errmsg[dump.id] : "UNKNOWN",
dump.id);
printf(" program counter = 0x%08X\n", dump.pc);
printf(" source line = 0x%08X\n", dump.src_line);
printf(" error data = 0x%08X%08X\n",
dump.error_data[0], dump.error_data[1]);
printf(" branch link = 0x%08X%08X\n",
dump.branch_link[0], dump.branch_link[1]);
printf(" interrupt link = 0x%08X%08X\n",
dump.interrupt_link[0], dump.interrupt_link[1]);
printf(" time = %u\n", dump.time[0]);
/* Dump driver status (TX and RX rings) while we're here. */
printf("driver status:\n");
for (i = 0; i < sc->ntxqs; i++) {
struct iwn_tx_ring *ring = &sc->txq[i];
printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
i, ring->qid, ring->cur, ring->queued);
}
printf(" rx ring: cur=%d\n", sc->rxq.cur);
}
static void
iwn_intr(void *arg)
{
struct iwn_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t r1, r2, tmp;
IWN_LOCK(sc);
/* Disable interrupts. */
IWN_WRITE(sc, IWN_INT_MASK, 0);
/* Read interrupts from ICT (fast) or from registers (slow). */
if (sc->sc_flags & IWN_FLAG_USE_ICT) {
tmp = 0;
while (sc->ict[sc->ict_cur] != 0) {
tmp |= sc->ict[sc->ict_cur];
sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
}
tmp = le32toh(tmp);
if (tmp == 0xffffffff) /* Shouldn't happen. */
tmp = 0;
else if (tmp & 0xc0000) /* Workaround a HW bug. */
tmp |= 0x8000;
r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
r2 = 0; /* Unused. */
} else {
r1 = IWN_READ(sc, IWN_INT);
if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) {
IWN_UNLOCK(sc);
return; /* Hardware gone! */
}
r2 = IWN_READ(sc, IWN_FH_INT);
}
DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n"
, r1, r2);
if (r1 == 0 && r2 == 0)
goto done; /* Interrupt not for us. */
/* Acknowledge interrupts. */
IWN_WRITE(sc, IWN_INT, r1);
if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
IWN_WRITE(sc, IWN_FH_INT, r2);
if (r1 & IWN_INT_RF_TOGGLED) {
iwn_rftoggle_intr(sc);
goto done;
}
if (r1 & IWN_INT_CT_REACHED) {
device_printf(sc->sc_dev, "%s: critical temperature reached!\n",
__func__);
}
if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
device_printf(sc->sc_dev, "%s: fatal firmware error\n",
__func__);
#ifdef IWN_DEBUG
iwn_debug_register(sc);
#endif
/* Dump firmware error log and stop. */
iwn_fatal_intr(sc);
taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task);
goto done;
}
if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
(r2 & IWN_FH_INT_RX)) {
if (sc->sc_flags & IWN_FLAG_USE_ICT) {
if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
IWN_WRITE_1(sc, IWN_INT_PERIODIC,
IWN_INT_PERIODIC_DIS);
iwn_notif_intr(sc);
if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
IWN_WRITE_1(sc, IWN_INT_PERIODIC,
IWN_INT_PERIODIC_ENA);
}
} else
iwn_notif_intr(sc);
}
if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
if (sc->sc_flags & IWN_FLAG_USE_ICT)
IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
wakeup(sc); /* FH DMA transfer completed. */
}
if (r1 & IWN_INT_ALIVE)
wakeup(sc); /* Firmware is alive. */
if (r1 & IWN_INT_WAKEUP)
iwn_wakeup_intr(sc);
done:
/* Re-enable interrupts. */
- if (ifp->if_flags & IFF_UP)
+ if (sc->sc_flags & IWN_FLAG_RUNNING)
IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
IWN_UNLOCK(sc);
}
/*
* Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
* 5000 adapters use a slightly different format).
*/
static void
iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
uint16_t len)
{
uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
*w = htole16(len + 8);
bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
BUS_DMASYNC_PREWRITE);
if (idx < IWN_SCHED_WINSZ) {
*(w + IWN_TX_RING_COUNT) = *w;
bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
BUS_DMASYNC_PREWRITE);
}
}
static void
iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
uint16_t len)
{
uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
*w = htole16(id << 12 | (len + 8));
bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
BUS_DMASYNC_PREWRITE);
if (idx < IWN_SCHED_WINSZ) {
*(w + IWN_TX_RING_COUNT) = *w;
bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
BUS_DMASYNC_PREWRITE);
}
}
#ifdef notyet
static void
iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
{
uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
*w = (*w & htole16(0xf000)) | htole16(1);
bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
BUS_DMASYNC_PREWRITE);
if (idx < IWN_SCHED_WINSZ) {
*(w + IWN_TX_RING_COUNT) = *w;
bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
BUS_DMASYNC_PREWRITE);
}
}
#endif
/*
* Check whether OFDM 11g protection will be enabled for the given rate.
*
* The original driver code only enabled protection for OFDM rates.
* It didn't check to see whether it was operating in 11a or 11bg mode.
*/
static int
iwn_check_rate_needs_protection(struct iwn_softc *sc,
struct ieee80211vap *vap, uint8_t rate)
{
struct ieee80211com *ic = vap->iv_ic;
/*
* Not in 2GHz mode? Then there's no need to enable OFDM
* 11bg protection.
*/
if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
return (0);
}
/*
* 11bg protection not enabled? Then don't use it.
*/
if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0)
return (0);
/*
* If it's an 11n rate - no protection.
* We'll do it via a specific 11n check.
*/
if (rate & IEEE80211_RATE_MCS) {
return (0);
}
/*
* Do a rate table lookup. If the PHY is CCK,
* don't do protection.
*/
if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK)
return (0);
/*
* Yup, enable protection.
*/
return (1);
}
/*
* return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into
* the link quality table that reflects this particular entry.
*/
static int
iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni,
uint8_t rate)
{
struct ieee80211_rateset *rs;
int is_11n;
int nr;
int i;
uint8_t cmp_rate;
/*
* Figure out if we're using 11n or not here.
*/
if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0)
is_11n = 1;
else
is_11n = 0;
/*
* Use the correct rate table.
*/
if (is_11n) {
rs = (struct ieee80211_rateset *) &ni->ni_htrates;
nr = ni->ni_htrates.rs_nrates;
} else {
rs = &ni->ni_rates;
nr = rs->rs_nrates;
}
/*
* Find the relevant link quality entry in the table.
*/
for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) {
/*
* The link quality table index starts at 0 == highest
* rate, so we walk the rate table backwards.
*/
cmp_rate = rs->rs_rates[(nr - 1) - i];
if (rate & IEEE80211_RATE_MCS)
cmp_rate |= IEEE80211_RATE_MCS;
#if 0
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n",
__func__,
i,
nr,
rate,
cmp_rate);
#endif
if (cmp_rate == rate)
return (i);
}
/* Failed? Start at the end */
return (IWN_MAX_TX_RETRIES - 1);
}
static int
iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
{
struct iwn_ops *ops = &sc->ops;
const struct ieee80211_txparam *tp;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct iwn_node *wn = (void *)ni;
struct iwn_tx_ring *ring;
struct iwn_tx_desc *desc;
struct iwn_tx_data *data;
struct iwn_tx_cmd *cmd;
struct iwn_cmd_data *tx;
struct ieee80211_frame *wh;
struct ieee80211_key *k = NULL;
struct mbuf *m1;
uint32_t flags;
uint16_t qos;
u_int hdrlen;
bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
uint8_t tid, type;
int ac, i, totlen, error, pad, nsegs = 0, rate;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
IWN_LOCK_ASSERT(sc);
wh = mtod(m, struct ieee80211_frame *);
hdrlen = ieee80211_anyhdrsize(wh);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
/* Select EDCA Access Category and TX ring for this frame. */
if (IEEE80211_QOS_HAS_SEQ(wh)) {
qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
tid = qos & IEEE80211_QOS_TID;
} else {
qos = 0;
tid = 0;
}
ac = M_WME_GETAC(m);
if (m->m_flags & M_AMPDU_MPDU) {
uint16_t seqno;
struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac];
if (!IEEE80211_AMPDU_RUNNING(tap)) {
m_freem(m);
return EINVAL;
}
/*
* Queue this frame to the hardware ring that we've
* negotiated AMPDU TX on.
*
* Note that the sequence number must match the TX slot
* being used!
*/
ac = *(int *)tap->txa_private;
seqno = ni->ni_txseqs[tid];
*(uint16_t *)wh->i_seq =
htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
ring = &sc->txq[ac];
if ((seqno % 256) != ring->cur) {
device_printf(sc->sc_dev,
"%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n",
__func__,
m,
seqno,
seqno % 256,
ring->cur);
}
ni->ni_txseqs[tid]++;
}
ring = &sc->txq[ac];
desc = &ring->desc[ring->cur];
data = &ring->data[ring->cur];
/* Choose a TX rate index. */
tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
if (type == IEEE80211_FC0_TYPE_MGT)
rate = tp->mgmtrate;
else if (IEEE80211_IS_MULTICAST(wh->i_addr1))
rate = tp->mcastrate;
else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rate = tp->ucastrate;
else if (m->m_flags & M_EAPOL)
rate = tp->mgmtrate;
else {
/* XXX pass pktlen */
(void) ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
}
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX. */
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
m_freem(m);
return ENOBUFS;
}
/* 802.11 header may have moved. */
wh = mtod(m, struct ieee80211_frame *);
}
totlen = m->m_pkthdr.len;
if (ieee80211_radiotap_active_vap(vap)) {
struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
if (k != NULL)
tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
ieee80211_radiotap_tx(vap, m);
}
/* Prepare TX firmware command. */
cmd = &ring->cmd[ring->cur];
cmd->code = IWN_CMD_TX_DATA;
cmd->flags = 0;
cmd->qid = ring->qid;
cmd->idx = ring->cur;
tx = (struct iwn_cmd_data *)cmd->data;
/* NB: No need to clear tx, all fields are reinitialized here. */
tx->scratch = 0; /* clear "scratch" area */
flags = 0;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
/* Unicast frame, check if an ACK is expected. */
if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
IEEE80211_QOS_ACKPOLICY_NOACK)
flags |= IWN_TX_NEED_ACK;
}
if ((wh->i_fc[0] &
(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
(IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
/* NB: Group frames are sent using CCK in 802.11b/g. */
if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
flags |= IWN_TX_NEED_RTS;
} else if (iwn_check_rate_needs_protection(sc, vap, rate)) {
if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
flags |= IWN_TX_NEED_CTS;
else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
flags |= IWN_TX_NEED_RTS;
} else if ((rate & IEEE80211_RATE_MCS) &&
(ic->ic_htprotmode == IEEE80211_PROT_RTSCTS)) {
flags |= IWN_TX_NEED_RTS;
}
/* XXX HT protection? */
if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
/* 5000 autoselects RTS/CTS or CTS-to-self. */
flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
flags |= IWN_TX_NEED_PROTECTION;
} else
flags |= IWN_TX_FULL_TXOP;
}
}
if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
type != IEEE80211_FC0_TYPE_DATA)
tx->id = sc->broadcast_id;
else
tx->id = wn->id;
if (type == IEEE80211_FC0_TYPE_MGT) {
uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
/* Tell HW to set timestamp in probe responses. */
if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
flags |= IWN_TX_INSERT_TSTAMP;
if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
tx->timeout = htole16(3);
else
tx->timeout = htole16(2);
} else
tx->timeout = htole16(0);
if (hdrlen & 3) {
/* First segment length must be a multiple of 4. */
flags |= IWN_TX_NEED_PADDING;
pad = 4 - (hdrlen & 3);
} else
pad = 0;
tx->len = htole16(totlen);
tx->tid = tid;
tx->rts_ntries = 60;
tx->data_ntries = 15;
tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
tx->rate = iwn_rate_to_plcp(sc, ni, rate);
if (tx->id == sc->broadcast_id) {
/* Group or management frame. */
tx->linkq = 0;
} else {
tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate);
flags |= IWN_TX_LINKQ; /* enable MRR */
}
/* Set physical address of "scratch area". */
tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
/* Copy 802.11 header in TX command. */
memcpy((uint8_t *)(tx + 1), wh, hdrlen);
/* Trim 802.11 header. */
m_adj(m, hdrlen);
tx->security = 0;
tx->flags = htole32(flags);
error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
&nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
if (error != EFBIG) {
device_printf(sc->sc_dev,
"%s: can't map mbuf (error %d)\n", __func__, error);
m_freem(m);
return error;
}
/* Too many DMA segments, linearize mbuf. */
m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
if (m1 == NULL) {
device_printf(sc->sc_dev,
"%s: could not defrag mbuf\n", __func__);
m_freem(m);
return ENOBUFS;
}
m = m1;
error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: can't map mbuf (error %d)\n", __func__, error);
m_freem(m);
return error;
}
}
data->m = m;
data->ni = ni;
DPRINTF(sc, IWN_DEBUG_XMIT,
"%s: qid %d idx %d len %d nsegs %d flags 0x%08x rate 0x%04x plcp 0x%08x\n",
__func__,
ring->qid,
ring->cur,
m->m_pkthdr.len,
nsegs,
flags,
rate,
tx->rate);
/* Fill TX descriptor. */
desc->nsegs = 1;
if (m->m_len != 0)
desc->nsegs += nsegs;
/* First DMA segment is used by the TX command. */
desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
(4 + sizeof (*tx) + hdrlen + pad) << 4);
/* Other DMA segments are for data payload. */
seg = &segs[0];
for (i = 1; i <= nsegs; i++) {
desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
seg->ds_len << 4);
seg++;
}
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
/* Update TX scheduler. */
if (ring->qid >= sc->firstaggqueue)
ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
/* Kick TX ring. */
ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
/* Mark TX ring as full if we reach a certain threshold. */
if (++ring->queued > IWN_TX_RING_HIMARK)
sc->qfullmsk |= 1 << ring->qid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return 0;
}
static int
iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
{
struct iwn_ops *ops = &sc->ops;
-// struct ifnet *ifp = sc->sc_ifp;
struct ieee80211vap *vap = ni->ni_vap;
-// struct ieee80211com *ic = ifp->if_l2com;
struct iwn_tx_cmd *cmd;
struct iwn_cmd_data *tx;
struct ieee80211_frame *wh;
struct iwn_tx_ring *ring;
struct iwn_tx_desc *desc;
struct iwn_tx_data *data;
struct mbuf *m1;
bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER];
uint32_t flags;
u_int hdrlen;
int ac, totlen, error, pad, nsegs = 0, i, rate;
uint8_t type;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
IWN_LOCK_ASSERT(sc);
wh = mtod(m, struct ieee80211_frame *);
hdrlen = ieee80211_anyhdrsize(wh);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
ac = params->ibp_pri & 3;
ring = &sc->txq[ac];
desc = &ring->desc[ring->cur];
data = &ring->data[ring->cur];
/* Choose a TX rate. */
rate = params->ibp_rate0;
totlen = m->m_pkthdr.len;
/* Prepare TX firmware command. */
cmd = &ring->cmd[ring->cur];
cmd->code = IWN_CMD_TX_DATA;
cmd->flags = 0;
cmd->qid = ring->qid;
cmd->idx = ring->cur;
tx = (struct iwn_cmd_data *)cmd->data;
/* NB: No need to clear tx, all fields are reinitialized here. */
tx->scratch = 0; /* clear "scratch" area */
flags = 0;
if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
flags |= IWN_TX_NEED_ACK;
if (params->ibp_flags & IEEE80211_BPF_RTS) {
if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
/* 5000 autoselects RTS/CTS or CTS-to-self. */
flags &= ~IWN_TX_NEED_RTS;
flags |= IWN_TX_NEED_PROTECTION;
} else
flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP;
}
if (params->ibp_flags & IEEE80211_BPF_CTS) {
if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
/* 5000 autoselects RTS/CTS or CTS-to-self. */
flags &= ~IWN_TX_NEED_CTS;
flags |= IWN_TX_NEED_PROTECTION;
} else
flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP;
}
if (type == IEEE80211_FC0_TYPE_MGT) {
uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
/* Tell HW to set timestamp in probe responses. */
if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
flags |= IWN_TX_INSERT_TSTAMP;
if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
tx->timeout = htole16(3);
else
tx->timeout = htole16(2);
} else
tx->timeout = htole16(0);
if (hdrlen & 3) {
/* First segment length must be a multiple of 4. */
flags |= IWN_TX_NEED_PADDING;
pad = 4 - (hdrlen & 3);
} else
pad = 0;
if (ieee80211_radiotap_active_vap(vap)) {
struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
ieee80211_radiotap_tx(vap, m);
}
tx->len = htole16(totlen);
tx->tid = 0;
tx->id = sc->broadcast_id;
tx->rts_ntries = params->ibp_try1;
tx->data_ntries = params->ibp_try0;
tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
tx->rate = iwn_rate_to_plcp(sc, ni, rate);
/* Group or management frame. */
tx->linkq = 0;
/* Set physical address of "scratch area". */
tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
/* Copy 802.11 header in TX command. */
memcpy((uint8_t *)(tx + 1), wh, hdrlen);
/* Trim 802.11 header. */
m_adj(m, hdrlen);
tx->security = 0;
tx->flags = htole32(flags);
error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
&nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
if (error != EFBIG) {
device_printf(sc->sc_dev,
"%s: can't map mbuf (error %d)\n", __func__, error);
m_freem(m);
return error;
}
/* Too many DMA segments, linearize mbuf. */
m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1);
if (m1 == NULL) {
device_printf(sc->sc_dev,
"%s: could not defrag mbuf\n", __func__);
m_freem(m);
return ENOBUFS;
}
m = m1;
error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: can't map mbuf (error %d)\n", __func__, error);
m_freem(m);
return error;
}
}
data->m = m;
data->ni = ni;
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
__func__, ring->qid, ring->cur, m->m_pkthdr.len, nsegs);
/* Fill TX descriptor. */
desc->nsegs = 1;
if (m->m_len != 0)
desc->nsegs += nsegs;
/* First DMA segment is used by the TX command. */
desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
(4 + sizeof (*tx) + hdrlen + pad) << 4);
/* Other DMA segments are for data payload. */
seg = &segs[0];
for (i = 1; i <= nsegs; i++) {
desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
seg->ds_len << 4);
seg++;
}
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
/* Update TX scheduler. */
if (ring->qid >= sc->firstaggqueue)
ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
/* Kick TX ring. */
ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
/* Mark TX ring as full if we reach a certain threshold. */
if (++ring->queued > IWN_TX_RING_HIMARK)
sc->qfullmsk |= 1 << ring->qid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return 0;
}
static void
iwn_xmit_task(void *arg0, int pending)
{
struct iwn_softc *sc = arg0;
- struct ifnet *ifp = sc->sc_ifp;
struct ieee80211_node *ni;
struct mbuf *m;
int error;
struct ieee80211_bpf_params p;
int have_p;
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__);
IWN_LOCK(sc);
/*
* Dequeue frames, attempt to transmit,
* then disable beaconwait when we're done.
*/
while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) {
have_p = 0;
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
/* Get xmit params if appropriate */
if (ieee80211_get_xmit_params(m, &p) == 0)
have_p = 1;
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: m=%p, have_p=%d\n",
__func__, m, have_p);
/* If we have xmit params, use them */
if (have_p)
error = iwn_tx_data_raw(sc, m, ni, &p);
else
error = iwn_tx_data(sc, m, ni);
if (error != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
}
sc->sc_beacon_wait = 0;
IWN_UNLOCK(sc);
}
static int
iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct iwn_softc *sc = ic->ic_softc;
int error = 0;
DPRINTF(sc, IWN_DEBUG_XMIT | IWN_DEBUG_TRACE, "->%s begin\n", __func__);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) {
ieee80211_free_node(ni);
m_freem(m);
return ENETDOWN;
}
/* XXX? net80211 doesn't set this on xmit'ed raw frames? */
m->m_pkthdr.rcvif = (void *) ni;
IWN_LOCK(sc);
/* queue frame if we have to */
if (sc->sc_beacon_wait) {
if (iwn_xmit_queue_enqueue(sc, m) != 0) {
m_freem(m);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
IWN_UNLOCK(sc);
return (ENOBUFS);
}
/* Queued, so just return OK */
IWN_UNLOCK(sc);
return (0);
}
if (params == NULL) {
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
*/
error = iwn_tx_data(sc, m, ni);
} else {
/*
* Caller supplied explicit parameters to use in
* sending the frame.
*/
error = iwn_tx_data_raw(sc, m, ni, params);
}
if (error != 0) {
/* NB: m is reclaimed on tx failure */
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
} else
sc->sc_tx_timer = 5;
IWN_UNLOCK(sc);
DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s: end\n",__func__);
return error;
}
-static void
-iwn_start(struct ifnet *ifp)
+static int
+iwn_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct iwn_softc *sc = ifp->if_softc;
+ struct iwn_softc *sc;
+ int error;
+ sc = ic->ic_softc;
+
IWN_LOCK(sc);
- iwn_start_locked(ifp);
+ if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) {
+ IWN_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ IWN_UNLOCK(sc);
+ return (error);
+ }
+ iwn_start_locked(sc);
IWN_UNLOCK(sc);
+ return (0);
}
static void
-iwn_start_locked(struct ifnet *ifp)
+iwn_start_locked(struct iwn_softc *sc)
{
- struct iwn_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
IWN_LOCK_ASSERT(sc);
/*
* If we're waiting for a beacon, we can just exit out here
* and wait for the taskqueue to be kicked.
*/
if (sc->sc_beacon_wait) {
return;
}
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__);
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
- (ifp->if_drv_flags & IFF_DRV_OACTIVE))
- return;
-
- for (;;) {
- if (sc->qfullmsk != 0) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
+ while (sc->qfullmsk == 0 &&
+ (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
if (iwn_tx_data(sc, m, ni) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
} else
sc->sc_tx_timer = 5;
}
-
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: done\n", __func__);
}
static void
iwn_watchdog(void *arg)
{
struct iwn_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
IWN_LOCK_ASSERT(sc);
- KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
+ KASSERT(sc->sc_flags & IWN_FLAG_RUNNING, ("not running"));
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
if (sc->sc_tx_timer > 0) {
if (--sc->sc_tx_timer == 0) {
ic_printf(ic, "device timeout\n");
ieee80211_runtask(ic, &sc->sc_reinit_task);
return;
}
}
callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
}
static int
-iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+iwn_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
{
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ifreq *ifr = data;
struct iwn_softc *sc = ic->ic_softc;
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0, stop = 0;
-
+ int error = 0;
+
switch (cmd) {
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- case SIOCSIFFLAGS:
- IWN_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- iwn_init_locked(sc);
- if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
- startall = 1;
- else
- stop = 1;
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- iwn_stop_locked(sc);
- }
- IWN_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- else if (vap != NULL && stop)
- ieee80211_stop(vap);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
case SIOCGIWNSTATS:
IWN_LOCK(sc);
/* XXX validate permissions/memory/etc? */
error = copyout(&sc->last_stat, ifr->ifr_data,
sizeof(struct iwn_stats));
IWN_UNLOCK(sc);
break;
case SIOCZIWNSTATS:
IWN_LOCK(sc);
memset(&sc->last_stat, 0, sizeof(struct iwn_stats));
IWN_UNLOCK(sc);
break;
default:
- error = EINVAL;
+ error = ENOTTY;
break;
}
- return error;
+ return (error);
}
+static void
+iwn_parent(struct ieee80211com *ic)
+{
+ struct iwn_softc *sc = ic->ic_softc;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ int startall = 0, stop = 0;
+
+ IWN_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ if (!(sc->sc_flags & IWN_FLAG_RUNNING)) {
+ iwn_init_locked(sc);
+ if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
+ startall = 1;
+ else
+ stop = 1;
+ }
+ } else if (sc->sc_flags & IWN_FLAG_RUNNING)
+ iwn_stop_locked(sc);
+ IWN_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
+ else if (vap != NULL && stop)
+ ieee80211_stop(vap);
+}
+
/*
* Send a command to the firmware.
*/
static int
iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
{
struct iwn_tx_ring *ring;
struct iwn_tx_desc *desc;
struct iwn_tx_data *data;
struct iwn_tx_cmd *cmd;
struct mbuf *m;
bus_addr_t paddr;
int totlen, error;
int cmd_queue_num;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
if (async == 0)
IWN_LOCK_ASSERT(sc);
if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
cmd_queue_num = IWN_PAN_CMD_QUEUE;
else
cmd_queue_num = IWN_CMD_QUEUE_NUM;
ring = &sc->txq[cmd_queue_num];
desc = &ring->desc[ring->cur];
data = &ring->data[ring->cur];
totlen = 4 + size;
if (size > sizeof cmd->data) {
/* Command is too large to fit in a descriptor. */
if (totlen > MCLBYTES)
return EINVAL;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
if (m == NULL)
return ENOMEM;
cmd = mtod(m, struct iwn_tx_cmd *);
error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(m);
return error;
}
data->m = m;
} else {
cmd = &ring->cmd[ring->cur];
paddr = data->cmd_paddr;
}
cmd->code = code;
cmd->flags = 0;
cmd->qid = ring->qid;
cmd->idx = ring->cur;
memcpy(cmd->data, buf, size);
desc->nsegs = 1;
desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n",
__func__, iwn_intr_str(cmd->code), cmd->code,
cmd->flags, cmd->qid, cmd->idx);
if (size > sizeof cmd->data) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_PREWRITE);
} else {
bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
BUS_DMASYNC_PREWRITE);
}
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
/* Kick command ring. */
ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz);
}
static int
iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
{
struct iwn4965_node_info hnode;
caddr_t src, dst;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/*
* We use the node structure for 5000 Series internally (it is
* a superset of the one for 4965AGN). We thus copy the common
* fields before sending the command.
*/
src = (caddr_t)node;
dst = (caddr_t)&hnode;
memcpy(dst, src, 48);
/* Skip TSC, RX MIC and TX MIC fields from ``src''. */
memcpy(dst + 48, src + 72, 20);
return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
}
static int
iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
{
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Direct mapping. */
return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
}
static int
iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
{
#define RV(v) ((v) & IEEE80211_RATE_VAL)
struct iwn_node *wn = (void *)ni;
struct ieee80211_rateset *rs;
struct iwn_cmd_link_quality linkq;
int i, rate, txrate;
int is_11n;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
memset(&linkq, 0, sizeof linkq);
linkq.id = wn->id;
linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
linkq.ampdu_max = 32; /* XXX negotiated? */
linkq.ampdu_threshold = 3;
linkq.ampdu_limit = htole16(4000); /* 4ms */
DPRINTF(sc, IWN_DEBUG_XMIT,
"%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n",
__func__,
linkq.antmsk_1stream,
linkq.antmsk_2stream,
sc->ntxchains);
/*
* Are we using 11n rates? Ensure the channel is
* 11n _and_ we have some 11n rates, or don't
* try.
*/
if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) {
rs = (struct ieee80211_rateset *) &ni->ni_htrates;
is_11n = 1;
} else {
rs = &ni->ni_rates;
is_11n = 0;
}
/* Start at highest available bit-rate. */
/*
* XXX this is all very dirty!
*/
if (is_11n)
txrate = ni->ni_htrates.rs_nrates - 1;
else
txrate = rs->rs_nrates - 1;
for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
uint32_t plcp;
/*
* XXX TODO: ensure the last two slots are the two lowest
* rate entries, just for now.
*/
if (i == 14 || i == 15)
txrate = 0;
if (is_11n)
rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate];
else
rate = RV(rs->rs_rates[txrate]);
/* Do rate -> PLCP config mapping */
plcp = iwn_rate_to_plcp(sc, ni, rate);
linkq.retry[i] = plcp;
DPRINTF(sc, IWN_DEBUG_XMIT,
"%s: i=%d, txrate=%d, rate=0x%02x, plcp=0x%08x\n",
__func__,
i,
txrate,
rate,
le32toh(plcp));
/*
* The mimo field is an index into the table which
* indicates the first index where it and subsequent entries
* will not be using MIMO.
*
* Since we're filling linkq from 0..15 and we're filling
* from the higest MCS rates to the lowest rates, if we
* _are_ doing a dual-stream rate, set mimo to idx+1 (ie,
* the next entry.) That way if the next entry is a non-MIMO
* entry, we're already pointing at it.
*/
if ((le32toh(plcp) & IWN_RFLAG_MCS) &&
RV(le32toh(plcp)) > 7)
linkq.mimo = i + 1;
/* Next retry at immediate lower bit-rate. */
if (txrate > 0)
txrate--;
}
/*
* If we reached the end of the list and indeed we hit
* all MIMO rates (eg 5300 doing MCS23-15) then yes,
* set mimo to 15. Setting it to 16 panics the firmware.
*/
if (linkq.mimo > 15)
linkq.mimo = 15;
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: mimo = %d\n", __func__, linkq.mimo);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
#undef RV
}
/*
* Broadcast node is used to send group-addressed and management frames.
*/
static int
iwn_add_broadcast_node(struct iwn_softc *sc, int async)
{
struct iwn_ops *ops = &sc->ops;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct iwn_node_info node;
struct iwn_cmd_link_quality linkq;
uint8_t txant;
int i, error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
memset(&node, 0, sizeof node);
- IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
+ IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr);
node.id = sc->broadcast_id;
DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__);
if ((error = ops->add_node(sc, &node, async)) != 0)
return error;
/* Use the first valid TX antenna. */
txant = IWN_LSB(sc->txchainmask);
memset(&linkq, 0, sizeof linkq);
linkq.id = sc->broadcast_id;
linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc);
linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc);
linkq.ampdu_max = 64;
linkq.ampdu_threshold = 3;
linkq.ampdu_limit = htole16(4000); /* 4ms */
/* Use lowest mandatory bit-rate. */
/* XXX rate table lookup? */
if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
linkq.retry[0] = htole32(0xd);
else
linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK);
linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant));
/* Use same bit-rate for all TX retries. */
for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
linkq.retry[i] = linkq.retry[0];
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
}
static int
iwn_updateedca(struct ieee80211com *ic)
{
#define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
struct iwn_softc *sc = ic->ic_softc;
struct iwn_edca_params cmd;
int aci;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
memset(&cmd, 0, sizeof cmd);
cmd.flags = htole32(IWN_EDCA_UPDATE);
for (aci = 0; aci < WME_NUM_AC; aci++) {
const struct wmeParams *ac =
&ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
cmd.ac[aci].aifsn = ac->wmep_aifsn;
cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin));
cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax));
cmd.ac[aci].txoplimit =
htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
}
IEEE80211_UNLOCK(ic);
IWN_LOCK(sc);
(void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
IWN_UNLOCK(sc);
IEEE80211_LOCK(ic);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return 0;
#undef IWN_EXP2
}
static void
iwn_update_mcast(struct ieee80211com *ic)
{
/* Ignore */
}
static void
iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
{
struct iwn_cmd_led led;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
#if 0
/* XXX don't set LEDs during scan? */
if (sc->sc_is_scanning)
return;
#endif
/* Clear microcode LED ownership. */
IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
led.which = which;
led.unit = htole32(10000); /* on/off in unit of 100ms */
led.off = off;
led.on = on;
(void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
}
/*
* Set the critical temperature at which the firmware will stop the radio
* and notify us.
*/
static int
iwn_set_critical_temp(struct iwn_softc *sc)
{
struct iwn_critical_temp crit;
int32_t temp;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
if (sc->hw_type == IWN_HW_REV_TYPE_5150)
temp = (IWN_CTOK(110) - sc->temp_off) * -5;
else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
temp = IWN_CTOK(110);
else
temp = 110;
memset(&crit, 0, sizeof crit);
crit.tempR = htole32(temp);
DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp);
return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
}
static int
iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
{
struct iwn_cmd_timing cmd;
uint64_t val, mod;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
memset(&cmd, 0, sizeof cmd);
memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
cmd.bintval = htole16(ni->ni_intval);
cmd.lintval = htole16(10);
/* Compute remaining time until next beacon. */
val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
mod = le64toh(cmd.tstamp) % val;
cmd.binitval = htole32((uint32_t)(val - mod));
DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
}
static void
iwn4965_power_calibration(struct iwn_softc *sc, int temp)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Adjust TX power if need be (delta >= 3 degC). */
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n",
__func__, sc->temp, temp);
if (abs(temp - sc->temp) >= 3) {
/* Record temperature of last calibration. */
sc->temp = temp;
(void)iwn4965_set_txpower(sc, ic->ic_bsschan, 1);
}
}
/*
* Set TX power for current channel (each rate has its own power settings).
* This function takes into account the regulatory information from EEPROM,
* the current temperature and the current voltage.
*/
static int
iwn4965_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
int async)
{
/* Fixed-point arithmetic division using a n-bit fractional part. */
#define fdivround(a, b, n) \
((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
/* Linear interpolation. */
#define interpolate(x, x1, y1, x2, y2, n) \
((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
struct iwn_ucode_info *uc = &sc->ucode_info;
struct iwn4965_cmd_txpower cmd;
struct iwn4965_eeprom_chan_samples *chans;
const uint8_t *rf_gain, *dsp_gain;
int32_t vdiff, tdiff;
int i, c, grp, maxpwr;
uint8_t chan;
sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
/* Retrieve current channel from last RXON. */
chan = sc->rxon->chan;
DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n",
chan);
memset(&cmd, 0, sizeof cmd);
cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
cmd.chan = chan;
if (IEEE80211_IS_CHAN_5GHZ(ch)) {
maxpwr = sc->maxpwr5GHz;
rf_gain = iwn4965_rf_gain_5ghz;
dsp_gain = iwn4965_dsp_gain_5ghz;
} else {
maxpwr = sc->maxpwr2GHz;
rf_gain = iwn4965_rf_gain_2ghz;
dsp_gain = iwn4965_dsp_gain_2ghz;
}
/* Compute voltage compensation. */
vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
if (vdiff > 0)
vdiff *= 2;
if (abs(vdiff) > 2)
vdiff = 0;
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
"%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
__func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage);
/* Get channel attenuation group. */
if (chan <= 20) /* 1-20 */
grp = 4;
else if (chan <= 43) /* 34-43 */
grp = 0;
else if (chan <= 70) /* 44-70 */
grp = 1;
else if (chan <= 124) /* 71-124 */
grp = 2;
else /* 125-200 */
grp = 3;
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
"%s: chan %d, attenuation group=%d\n", __func__, chan, grp);
/* Get channel sub-band. */
for (i = 0; i < IWN_NBANDS; i++)
if (sc->bands[i].lo != 0 &&
sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
break;
if (i == IWN_NBANDS) /* Can't happen in real-life. */
return EINVAL;
chans = sc->bands[i].chans;
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
"%s: chan %d sub-band=%d\n", __func__, chan, i);
for (c = 0; c < 2; c++) {
uint8_t power, gain, temp;
int maxchpwr, pwr, ridx, idx;
power = interpolate(chan,
chans[0].num, chans[0].samples[c][1].power,
chans[1].num, chans[1].samples[c][1].power, 1);
gain = interpolate(chan,
chans[0].num, chans[0].samples[c][1].gain,
chans[1].num, chans[1].samples[c][1].gain, 1);
temp = interpolate(chan,
chans[0].num, chans[0].samples[c][1].temp,
chans[1].num, chans[1].samples[c][1].temp, 1);
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
"%s: Tx chain %d: power=%d gain=%d temp=%d\n",
__func__, c, power, gain, temp);
/* Compute temperature compensation. */
tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
"%s: temperature compensation=%d (current=%d, EEPROM=%d)\n",
__func__, tdiff, sc->temp, temp);
for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
/* Convert dBm to half-dBm. */
maxchpwr = sc->maxpwr[chan] * 2;
if ((ridx / 8) & 1)
maxchpwr -= 6; /* MIMO 2T: -3dB */
pwr = maxpwr;
/* Adjust TX power based on rate. */
if ((ridx % 8) == 5)
pwr -= 15; /* OFDM48: -7.5dB */
else if ((ridx % 8) == 6)
pwr -= 17; /* OFDM54: -8.5dB */
else if ((ridx % 8) == 7)
pwr -= 20; /* OFDM60: -10dB */
else
pwr -= 10; /* Others: -5dB */
/* Do not exceed channel max TX power. */
if (pwr > maxchpwr)
pwr = maxchpwr;
idx = gain - (pwr - power) - tdiff - vdiff;
if ((ridx / 8) & 1) /* MIMO */
idx += (int32_t)le32toh(uc->atten[grp][c]);
if (cmd.band == 0)
idx += 9; /* 5GHz */
if (ridx == IWN_RIDX_MAX)
idx += 5; /* CCK */
/* Make sure idx stays in a valid range. */
if (idx < 0)
idx = 0;
else if (idx > IWN4965_MAX_PWR_INDEX)
idx = IWN4965_MAX_PWR_INDEX;
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
"%s: Tx chain %d, rate idx %d: power=%d\n",
__func__, c, ridx, idx);
cmd.power[ridx].rf_gain[c] = rf_gain[idx];
cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
}
}
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW,
"%s: set tx power for chan %d\n", __func__, chan);
return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
#undef interpolate
#undef fdivround
}
static int
iwn5000_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch,
int async)
{
struct iwn5000_cmd_txpower cmd;
int cmdid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/*
* TX power calibration is handled automatically by the firmware
* for 5000 Series.
*/
memset(&cmd, 0, sizeof cmd);
cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
cmd.srv_limit = IWN5000_TXPOWER_AUTO;
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
"%s: setting TX power; rev=%d\n",
__func__,
IWN_UCODE_API(sc->ucode_rev));
if (IWN_UCODE_API(sc->ucode_rev) == 1)
cmdid = IWN_CMD_TXPOWER_DBM_V1;
else
cmdid = IWN_CMD_TXPOWER_DBM;
return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async);
}
/*
* Retrieve the maximum RSSI (in dBm) among receivers.
*/
static int
iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
{
struct iwn4965_rx_phystat *phy = (void *)stat->phybuf;
uint8_t mask, agc;
int rssi;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
agc = (le16toh(phy->agc) >> 7) & 0x7f;
rssi = 0;
if (mask & IWN_ANT_A)
rssi = MAX(rssi, phy->rssi[0]);
if (mask & IWN_ANT_B)
rssi = MAX(rssi, phy->rssi[2]);
if (mask & IWN_ANT_C)
rssi = MAX(rssi, phy->rssi[4]);
DPRINTF(sc, IWN_DEBUG_RECV,
"%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc,
mask, phy->rssi[0], phy->rssi[2], phy->rssi[4],
rssi - agc - IWN_RSSI_TO_DBM);
return rssi - agc - IWN_RSSI_TO_DBM;
}
static int
iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat)
{
struct iwn5000_rx_phystat *phy = (void *)stat->phybuf;
uint8_t agc;
int rssi;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
agc = (le32toh(phy->agc) >> 9) & 0x7f;
rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
le16toh(phy->rssi[1]) & 0xff);
rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
DPRINTF(sc, IWN_DEBUG_RECV,
"%s: agc %d rssi %d %d %d result %d\n", __func__, agc,
phy->rssi[0], phy->rssi[1], phy->rssi[2],
rssi - agc - IWN_RSSI_TO_DBM);
return rssi - agc - IWN_RSSI_TO_DBM;
}
/*
* Retrieve the average noise (in dBm) among receivers.
*/
static int
iwn_get_noise(const struct iwn_rx_general_stats *stats)
{
int i, total, nbant, noise;
total = nbant = 0;
for (i = 0; i < 3; i++) {
if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
continue;
total += noise;
nbant++;
}
/* There should be at least one antenna but check anyway. */
return (nbant == 0) ? -127 : (total / nbant) - 107;
}
/*
* Compute temperature (in degC) from last received statistics.
*/
static int
iwn4965_get_temperature(struct iwn_softc *sc)
{
struct iwn_ucode_info *uc = &sc->ucode_info;
int32_t r1, r2, r3, r4, temp;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
r1 = le32toh(uc->temp[0].chan20MHz);
r2 = le32toh(uc->temp[1].chan20MHz);
r3 = le32toh(uc->temp[2].chan20MHz);
r4 = le32toh(sc->rawtemp);
if (r1 == r3) /* Prevents division by 0 (should not happen). */
return 0;
/* Sign-extend 23-bit R4 value to 32-bit. */
r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
/* Compute temperature in Kelvin. */
temp = (259 * (r4 - r2)) / (r3 - r1);
temp = (temp * 97) / 100 + 8;
DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp,
IWN_KTOC(temp));
return IWN_KTOC(temp);
}
static int
iwn5000_get_temperature(struct iwn_softc *sc)
{
int32_t temp;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/*
* Temperature is not used by the driver for 5000 Series because
* TX power calibration is handled by firmware.
*/
temp = le32toh(sc->rawtemp);
if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
temp = (temp / -5) + sc->temp_off;
temp = IWN_KTOC(temp);
}
return temp;
}
/*
* Initialize sensitivity calibration state machine.
*/
static int
iwn_init_sensitivity(struct iwn_softc *sc)
{
struct iwn_ops *ops = &sc->ops;
struct iwn_calib_state *calib = &sc->calib;
uint32_t flags;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Reset calibration state machine. */
memset(calib, 0, sizeof (*calib));
calib->state = IWN_CALIB_STATE_INIT;
calib->cck_state = IWN_CCK_STATE_HIFA;
/* Set initial correlation values. */
calib->ofdm_x1 = sc->limits->min_ofdm_x1;
calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
calib->ofdm_x4 = sc->limits->min_ofdm_x4;
calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
calib->cck_x4 = 125;
calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
calib->energy_cck = sc->limits->energy_cck;
/* Write initial sensitivity. */
if ((error = iwn_send_sensitivity(sc)) != 0)
return error;
/* Write initial gains. */
if ((error = ops->init_gains(sc)) != 0)
return error;
/* Request statistics at each beacon interval. */
flags = 0;
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n",
__func__);
return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
}
/*
* Collect noise and RSSI statistics for the first 20 beacons received
* after association and use them to determine connected antennas and
* to set differential gains.
*/
static void
iwn_collect_noise(struct iwn_softc *sc,
const struct iwn_rx_general_stats *stats)
{
struct iwn_ops *ops = &sc->ops;
struct iwn_calib_state *calib = &sc->calib;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t val;
int i;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Accumulate RSSI and noise for all 3 antennas. */
for (i = 0; i < 3; i++) {
calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
}
/* NB: We update differential gains only once after 20 beacons. */
if (++calib->nbeacons < 20)
return;
/* Determine highest average RSSI. */
val = MAX(calib->rssi[0], calib->rssi[1]);
val = MAX(calib->rssi[2], val);
/* Determine which antennas are connected. */
sc->chainmask = sc->rxchainmask;
for (i = 0; i < 3; i++)
if (val - calib->rssi[i] > 15 * 20)
sc->chainmask &= ~(1 << i);
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
"%s: RX chains mask: theoretical=0x%x, actual=0x%x\n",
__func__, sc->rxchainmask, sc->chainmask);
/* If none of the TX antennas are connected, keep at least one. */
if ((sc->chainmask & sc->txchainmask) == 0)
sc->chainmask |= IWN_LSB(sc->txchainmask);
(void)ops->set_gains(sc);
calib->state = IWN_CALIB_STATE_RUN;
#ifdef notyet
/* XXX Disable RX chains with no antennas connected. */
sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
if (sc->sc_is_scanning)
device_printf(sc->sc_dev,
"%s: is_scanning set, before RXON\n",
__func__);
(void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
#endif
/* Enable power-saving mode if requested by user. */
if (ic->ic_flags & IEEE80211_F_PMGTON)
(void)iwn_set_pslevel(sc, 0, 3, 1);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
}
static int
iwn4965_init_gains(struct iwn_softc *sc)
{
struct iwn_phy_calib_gain cmd;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
memset(&cmd, 0, sizeof cmd);
cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
/* Differential gains initially set to 0 for all 3 antennas. */
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: setting initial differential gains\n", __func__);
return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
}
static int
iwn5000_init_gains(struct iwn_softc *sc)
{
struct iwn_phy_calib cmd;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
memset(&cmd, 0, sizeof cmd);
cmd.code = sc->reset_noise_gain;
cmd.ngroups = 1;
cmd.isvalid = 1;
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: setting initial differential gains\n", __func__);
return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
}
static int
iwn4965_set_gains(struct iwn_softc *sc)
{
struct iwn_calib_state *calib = &sc->calib;
struct iwn_phy_calib_gain cmd;
int i, delta, noise;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Get minimal noise among connected antennas. */
noise = INT_MAX; /* NB: There's at least one antenna. */
for (i = 0; i < 3; i++)
if (sc->chainmask & (1 << i))
noise = MIN(calib->noise[i], noise);
memset(&cmd, 0, sizeof cmd);
cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
/* Set differential gains for connected antennas. */
for (i = 0; i < 3; i++) {
if (sc->chainmask & (1 << i)) {
/* Compute attenuation (in unit of 1.5dB). */
delta = (noise - (int32_t)calib->noise[i]) / 30;
/* NB: delta <= 0 */
/* Limit to [-4.5dB,0]. */
cmd.gain[i] = MIN(abs(delta), 3);
if (delta < 0)
cmd.gain[i] |= 1 << 2; /* sign bit */
}
}
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask);
return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
}
static int
iwn5000_set_gains(struct iwn_softc *sc)
{
struct iwn_calib_state *calib = &sc->calib;
struct iwn_phy_calib_gain cmd;
int i, ant, div, delta;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* We collected 20 beacons and !=6050 need a 1.5 factor. */
div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
memset(&cmd, 0, sizeof cmd);
cmd.code = sc->noise_gain;
cmd.ngroups = 1;
cmd.isvalid = 1;
/* Get first available RX antenna as referential. */
ant = IWN_LSB(sc->rxchainmask);
/* Set differential gains for other antennas. */
for (i = ant + 1; i < 3; i++) {
if (sc->chainmask & (1 << i)) {
/* The delta is relative to antenna "ant". */
delta = ((int32_t)calib->noise[ant] -
(int32_t)calib->noise[i]) / div;
/* Limit to [-4.5dB,+4.5dB]. */
cmd.gain[i - 1] = MIN(abs(delta), 3);
if (delta < 0)
cmd.gain[i - 1] |= 1 << 2; /* sign bit */
}
}
DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT,
"setting differential gains Ant B/C: %x/%x (%x)\n",
cmd.gain[0], cmd.gain[1], sc->chainmask);
return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
}
/*
* Tune RF RX sensitivity based on the number of false alarms detected
* during the last beacon period.
*/
static void
iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
{
#define inc(val, inc, max) \
if ((val) < (max)) { \
if ((val) < (max) - (inc)) \
(val) += (inc); \
else \
(val) = (max); \
needs_update = 1; \
}
#define dec(val, dec, min) \
if ((val) > (min)) { \
if ((val) > (min) + (dec)) \
(val) -= (dec); \
else \
(val) = (min); \
needs_update = 1; \
}
const struct iwn_sensitivity_limits *limits = sc->limits;
struct iwn_calib_state *calib = &sc->calib;
uint32_t val, rxena, fa;
uint32_t energy[3], energy_min;
uint8_t noise[3], noise_ref;
int i, needs_update = 0;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Check that we've been enabled long enough. */
if ((rxena = le32toh(stats->general.load)) == 0){
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__);
return;
}
/* Compute number of false alarms since last call for OFDM. */
fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
if (fa > 50 * rxena) {
/* High false alarm count, decrease sensitivity. */
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: OFDM high false alarm count: %u\n", __func__, fa);
inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
} else if (fa < 5 * rxena) {
/* Low false alarm count, increase sensitivity. */
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: OFDM low false alarm count: %u\n", __func__, fa);
dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
}
/* Compute maximum noise among 3 receivers. */
for (i = 0; i < 3; i++)
noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
val = MAX(noise[0], noise[1]);
val = MAX(noise[2], val);
/* Insert it into our samples table. */
calib->noise_samples[calib->cur_noise_sample] = val;
calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
/* Compute maximum noise among last 20 samples. */
noise_ref = calib->noise_samples[0];
for (i = 1; i < 20; i++)
noise_ref = MAX(noise_ref, calib->noise_samples[i]);
/* Compute maximum energy among 3 receivers. */
for (i = 0; i < 3; i++)
energy[i] = le32toh(stats->general.energy[i]);
val = MIN(energy[0], energy[1]);
val = MIN(energy[2], val);
/* Insert it into our samples table. */
calib->energy_samples[calib->cur_energy_sample] = val;
calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
/* Compute minimum energy among last 10 samples. */
energy_min = calib->energy_samples[0];
for (i = 1; i < 10; i++)
energy_min = MAX(energy_min, calib->energy_samples[i]);
energy_min += 6;
/* Compute number of false alarms since last call for CCK. */
fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
fa += le32toh(stats->cck.fa) - calib->fa_cck;
fa *= 200 * IEEE80211_DUR_TU; /* 200TU */
if (fa > 50 * rxena) {
/* High false alarm count, decrease sensitivity. */
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: CCK high false alarm count: %u\n", __func__, fa);
calib->cck_state = IWN_CCK_STATE_HIFA;
calib->low_fa = 0;
if (calib->cck_x4 > 160) {
calib->noise_ref = noise_ref;
if (calib->energy_cck > 2)
dec(calib->energy_cck, 2, energy_min);
}
if (calib->cck_x4 < 160) {
calib->cck_x4 = 161;
needs_update = 1;
} else
inc(calib->cck_x4, 3, limits->max_cck_x4);
inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
} else if (fa < 5 * rxena) {
/* Low false alarm count, increase sensitivity. */
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: CCK low false alarm count: %u\n", __func__, fa);
calib->cck_state = IWN_CCK_STATE_LOFA;
calib->low_fa++;
if (calib->cck_state != IWN_CCK_STATE_INIT &&
(((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
calib->low_fa > 100)) {
inc(calib->energy_cck, 2, limits->min_energy_cck);
dec(calib->cck_x4, 3, limits->min_cck_x4);
dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
}
} else {
/* Not worth to increase or decrease sensitivity. */
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: CCK normal false alarm count: %u\n", __func__, fa);
calib->low_fa = 0;
calib->noise_ref = noise_ref;
if (calib->cck_state == IWN_CCK_STATE_HIFA) {
/* Previous interval had many false alarms. */
dec(calib->energy_cck, 8, energy_min);
}
calib->cck_state = IWN_CCK_STATE_INIT;
}
if (needs_update)
(void)iwn_send_sensitivity(sc);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
#undef dec
#undef inc
}
static int
iwn_send_sensitivity(struct iwn_softc *sc)
{
struct iwn_calib_state *calib = &sc->calib;
struct iwn_enhanced_sensitivity_cmd cmd;
int len;
memset(&cmd, 0, sizeof cmd);
len = sizeof (struct iwn_sensitivity_cmd);
cmd.which = IWN_SENSITIVITY_WORKTBL;
/* OFDM modulation. */
cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
cmd.energy_ofdm_th = htole16(62);
/* CCK modulation. */
cmd.corr_cck_x4 = htole16(calib->cck_x4);
cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
cmd.energy_cck = htole16(calib->energy_cck);
/* Barker modulation: use default values. */
cmd.corr_barker = htole16(190);
cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc);
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__,
calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
calib->ofdm_mrc_x4, calib->cck_x4,
calib->cck_mrc_x4, calib->energy_cck);
if (!(sc->sc_flags & IWN_FLAG_ENH_SENS))
goto send;
/* Enhanced sensitivity settings. */
len = sizeof (struct iwn_enhanced_sensitivity_cmd);
cmd.ofdm_det_slope_mrc = htole16(668);
cmd.ofdm_det_icept_mrc = htole16(4);
cmd.ofdm_det_slope = htole16(486);
cmd.ofdm_det_icept = htole16(37);
cmd.cck_det_slope_mrc = htole16(853);
cmd.cck_det_icept_mrc = htole16(4);
cmd.cck_det_slope = htole16(476);
cmd.cck_det_icept = htole16(99);
send:
return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1);
}
/*
* Look at the increase of PLCP errors over time; if it exceeds
* a programmed threshold then trigger an RF retune.
*/
static void
iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs)
{
int32_t delta_ofdm, delta_ht, delta_cck;
struct iwn_calib_state *calib = &sc->calib;
int delta_ticks, cur_ticks;
int delta_msec;
int thresh;
/*
* Calculate the difference between the current and
* previous statistics.
*/
delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck;
delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm;
delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht;
/*
* Calculate the delta in time between successive statistics
* messages. Yes, it can roll over; so we make sure that
* this doesn't happen.
*
* XXX go figure out what to do about rollover
* XXX go figure out what to do if ticks rolls over to -ve instead!
* XXX go stab signed integer overflow undefined-ness in the face.
*/
cur_ticks = ticks;
delta_ticks = cur_ticks - sc->last_calib_ticks;
/*
* If any are negative, then the firmware likely reset; so just
* bail. We'll pick this up next time.
*/
if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0)
return;
/*
* delta_ticks is in ticks; we need to convert it up to milliseconds
* so we can do some useful math with it.
*/
delta_msec = ticks_to_msecs(delta_ticks);
/*
* Calculate what our threshold is given the current delta_msec.
*/
thresh = sc->base_params->plcp_err_threshold * delta_msec;
DPRINTF(sc, IWN_DEBUG_STATE,
"%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n",
__func__,
delta_msec,
delta_cck,
delta_ofdm,
delta_ht,
(delta_msec + delta_cck + delta_ofdm + delta_ht),
thresh);
/*
* If we need a retune, then schedule a single channel scan
* to a channel that isn't the currently active one!
*
* The math from linux iwlwifi:
*
* if ((delta * 100 / msecs) > threshold)
*/
if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) {
DPRINTF(sc, IWN_DEBUG_ANY,
"%s: PLCP error threshold raw (%d) comparison (%d) "
"over limit (%d); retune!\n",
__func__,
(delta_cck + delta_ofdm + delta_ht),
(delta_cck + delta_ofdm + delta_ht) * 100,
thresh);
}
}
/*
* Set STA mode power saving level (between 0 and 5).
* Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
*/
static int
iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
{
struct iwn_pmgt_cmd cmd;
const struct iwn_pmgt *pmgt;
uint32_t max, skip_dtim;
uint32_t reg;
int i;
DPRINTF(sc, IWN_DEBUG_PWRSAVE,
"%s: dtim=%d, level=%d, async=%d\n",
__func__,
dtim,
level,
async);
/* Select which PS parameters to use. */
if (dtim <= 2)
pmgt = &iwn_pmgt[0][level];
else if (dtim <= 10)
pmgt = &iwn_pmgt[1][level];
else
pmgt = &iwn_pmgt[2][level];
memset(&cmd, 0, sizeof cmd);
if (level != 0) /* not CAM */
cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
if (level == 5)
cmd.flags |= htole16(IWN_PS_FAST_PD);
/* Retrieve PCIe Active State Power Management (ASPM). */
reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
if (!(reg & 0x1)) /* L0s Entry disabled. */
cmd.flags |= htole16(IWN_PS_PCI_PMGT);
cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
if (dtim == 0) {
dtim = 1;
skip_dtim = 0;
} else
skip_dtim = pmgt->skip_dtim;
if (skip_dtim != 0) {
cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
max = pmgt->intval[4];
if (max == (uint32_t)-1)
max = dtim * (skip_dtim + 1);
else if (max > dtim)
max = (max / dtim) * dtim;
} else
max = dtim;
for (i = 0; i < 5; i++)
cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n",
level);
return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
}
static int
iwn_send_btcoex(struct iwn_softc *sc)
{
struct iwn_bluetooth cmd;
memset(&cmd, 0, sizeof cmd);
cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO;
cmd.lead_time = IWN_BT_LEAD_TIME_DEF;
cmd.max_kill = IWN_BT_MAX_KILL_DEF;
DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
__func__);
return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
}
static int
iwn_send_advanced_btcoex(struct iwn_softc *sc)
{
static const uint32_t btcoex_3wire[12] = {
0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa,
0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa,
0xc0004000, 0x00004000, 0xf0005000, 0xf0005000,
};
struct iwn6000_btcoex_config btconfig;
struct iwn2000_btcoex_config btconfig2k;
struct iwn_btcoex_priotable btprio;
struct iwn_btcoex_prot btprot;
int error, i;
uint8_t flags;
memset(&btconfig, 0, sizeof btconfig);
memset(&btconfig2k, 0, sizeof btconfig2k);
flags = IWN_BT_FLAG_COEX6000_MODE_3W <<
IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2
if (sc->base_params->bt_sco_disable)
flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE;
else
flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE;
flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION;
/* Default flags result is 145 as old value */
/*
* Flags value has to be review. Values must change if we
* which to disable it
*/
if (sc->base_params->bt_session_2) {
btconfig2k.flags = flags;
btconfig2k.max_kill = 5;
btconfig2k.bt3_t7_timer = 1;
btconfig2k.kill_ack = htole32(0xffff0000);
btconfig2k.kill_cts = htole32(0xffff0000);
btconfig2k.sample_time = 2;
btconfig2k.bt3_t2_timer = 0xc;
for (i = 0; i < 12; i++)
btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]);
btconfig2k.valid = htole16(0xff);
btconfig2k.prio_boost = htole32(0xf0);
DPRINTF(sc, IWN_DEBUG_RESET,
"%s: configuring advanced bluetooth coexistence"
" session 2, flags : 0x%x\n",
__func__,
flags);
error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k,
sizeof(btconfig2k), 1);
} else {
btconfig.flags = flags;
btconfig.max_kill = 5;
btconfig.bt3_t7_timer = 1;
btconfig.kill_ack = htole32(0xffff0000);
btconfig.kill_cts = htole32(0xffff0000);
btconfig.sample_time = 2;
btconfig.bt3_t2_timer = 0xc;
for (i = 0; i < 12; i++)
btconfig.lookup_table[i] = htole32(btcoex_3wire[i]);
btconfig.valid = htole16(0xff);
btconfig.prio_boost = 0xf0;
DPRINTF(sc, IWN_DEBUG_RESET,
"%s: configuring advanced bluetooth coexistence,"
" flags : 0x%x\n",
__func__,
flags);
error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig,
sizeof(btconfig), 1);
}
if (error != 0)
return error;
memset(&btprio, 0, sizeof btprio);
btprio.calib_init1 = 0x6;
btprio.calib_init2 = 0x7;
btprio.calib_periodic_low1 = 0x2;
btprio.calib_periodic_low2 = 0x3;
btprio.calib_periodic_high1 = 0x4;
btprio.calib_periodic_high2 = 0x5;
btprio.dtim = 0x6;
btprio.scan52 = 0x8;
btprio.scan24 = 0xa;
error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio),
1);
if (error != 0)
return error;
/* Force BT state machine change. */
memset(&btprot, 0, sizeof btprot);
btprot.open = 1;
btprot.type = 1;
error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
if (error != 0)
return error;
btprot.open = 0;
return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1);
}
static int
iwn5000_runtime_calib(struct iwn_softc *sc)
{
struct iwn5000_calib_config cmd;
memset(&cmd, 0, sizeof cmd);
cmd.ucode.once.enable = 0xffffffff;
cmd.ucode.once.start = IWN5000_CALIB_DC;
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"%s: configuring runtime calibration\n", __func__);
return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
}
static uint32_t
iwn_get_rxon_ht_flags(struct iwn_softc *sc, struct ieee80211_channel *c)
{
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t htflags = 0;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
if (! IEEE80211_IS_CHAN_HT(c))
return (0);
htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode);
if (IEEE80211_IS_CHAN_HT40(c)) {
switch (ic->ic_curhtprotmode) {
case IEEE80211_HTINFO_OPMODE_HT20PR:
htflags |= IWN_RXON_HT_MODEPURE40;
break;
default:
htflags |= IWN_RXON_HT_MODEMIXED;
break;
}
}
if (IEEE80211_IS_CHAN_HT40D(c))
htflags |= IWN_RXON_HT_HT40MINUS;
return (htflags);
}
static int
iwn_config(struct iwn_softc *sc)
{
struct iwn_ops *ops = &sc->ops;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ const uint8_t *macaddr;
uint32_t txmask;
uint16_t rxchain;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET)
&& (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) {
device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are"
" exclusive each together. Review NIC config file. Conf"
" : 0x%08x Flags : 0x%08x \n", __func__,
sc->base_params->calib_need,
(IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET |
IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2));
return (EINVAL);
}
/* Compute temperature calib if needed. Will be send by send calib */
if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) {
error = iwn5000_temp_offset_calib(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not set temperature offset\n", __func__);
return (error);
}
} else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) {
error = iwn5000_temp_offset_calibv2(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not compute temperature offset v2\n",
__func__);
return (error);
}
}
if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
/* Configure runtime DC calibration. */
error = iwn5000_runtime_calib(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not configure runtime calibration\n",
__func__);
return error;
}
}
/* Configure valid TX chains for >=5000 Series. */
if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
IWN_UCODE_API(sc->ucode_rev) > 1) {
txmask = htole32(sc->txchainmask);
DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT,
"%s: configuring valid TX chains 0x%x\n", __func__, txmask);
error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
sizeof txmask, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not configure valid TX chains, "
"error %d\n", __func__, error);
return error;
}
}
/* Configure bluetooth coexistence. */
error = 0;
/* Configure bluetooth coexistence if needed. */
if (sc->base_params->bt_mode == IWN_BT_ADVANCED)
error = iwn_send_advanced_btcoex(sc);
if (sc->base_params->bt_mode == IWN_BT_SIMPLE)
error = iwn_send_btcoex(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not configure bluetooth coexistence, error %d\n",
__func__, error);
return error;
}
/* Set mode, channel, RX filter and enable RX. */
sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
memset(sc->rxon, 0, sizeof (struct iwn_rxon));
- IEEE80211_ADDR_COPY(sc->rxon->myaddr, IF_LLADDR(ifp));
- IEEE80211_ADDR_COPY(sc->rxon->wlap, IF_LLADDR(ifp));
+ macaddr = vap ? vap->iv_myaddr : ic->ic_macaddr;
+ IEEE80211_ADDR_COPY(sc->rxon->myaddr, macaddr);
+ IEEE80211_ADDR_COPY(sc->rxon->wlap, macaddr);
sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan);
sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
switch (ic->ic_opmode) {
case IEEE80211_M_STA:
sc->rxon->mode = IWN_MODE_STA;
sc->rxon->filter = htole32(IWN_FILTER_MULTICAST);
break;
case IEEE80211_M_MONITOR:
sc->rxon->mode = IWN_MODE_MONITOR;
sc->rxon->filter = htole32(IWN_FILTER_MULTICAST |
IWN_FILTER_CTL | IWN_FILTER_PROMISC);
break;
default:
/* Should not get there. */
break;
}
sc->rxon->cck_mask = 0x0f; /* not yet negotiated */
sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */
sc->rxon->ht_single_mask = 0xff;
sc->rxon->ht_dual_mask = 0xff;
sc->rxon->ht_triple_mask = 0xff;
/*
* In active association mode, ensure that
* all the receive chains are enabled.
*
* Since we're not yet doing SMPS, don't allow the
* number of idle RX chains to be less than the active
* number.
*/
rxchain =
IWN_RXCHAIN_VALID(sc->rxchainmask) |
IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) |
IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains);
sc->rxon->rxchain = htole16(rxchain);
DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT,
"%s: rxchainmask=0x%x, nrxchains=%d\n",
__func__,
sc->rxchainmask,
sc->nrxchains);
sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan));
DPRINTF(sc, IWN_DEBUG_RESET,
"%s: setting configuration; flags=0x%08x\n",
__func__, le32toh(sc->rxon->flags));
if (sc->sc_is_scanning)
device_printf(sc->sc_dev,
"%s: is_scanning set, before RXON\n",
__func__);
error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 0);
if (error != 0) {
device_printf(sc->sc_dev, "%s: RXON command failed\n",
__func__);
return error;
}
if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
device_printf(sc->sc_dev, "%s: could not add broadcast node\n",
__func__);
return error;
}
/* Configuration has changed, set TX power accordingly. */
if ((error = ops->set_txpower(sc, ic->ic_curchan, 0)) != 0) {
device_printf(sc->sc_dev, "%s: could not set TX power\n",
__func__);
return error;
}
if ((error = iwn_set_critical_temp(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set critical temperature\n", __func__);
return error;
}
/* Set power saving level to CAM during initialization. */
if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set power saving level\n", __func__);
return error;
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return 0;
}
static uint16_t
iwn_get_active_dwell_time(struct iwn_softc *sc,
struct ieee80211_channel *c, uint8_t n_probes)
{
/* No channel? Default to 2GHz settings */
if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
return (IWN_ACTIVE_DWELL_TIME_2GHZ +
IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
}
/* 5GHz dwell time */
return (IWN_ACTIVE_DWELL_TIME_5GHZ +
IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
}
/*
* Limit the total dwell time to 85% of the beacon interval.
*
* Returns the dwell time in milliseconds.
*/
static uint16_t
iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = NULL;
int bintval = 0;
/* bintval is in TU (1.024mS) */
if (! TAILQ_EMPTY(&ic->ic_vaps)) {
vap = TAILQ_FIRST(&ic->ic_vaps);
bintval = vap->iv_bss->ni_intval;
}
/*
* If it's non-zero, we should calculate the minimum of
* it and the DWELL_BASE.
*
* XXX Yes, the math should take into account that bintval
* is 1.024mS, not 1mS..
*/
if (bintval > 0) {
DPRINTF(sc, IWN_DEBUG_SCAN,
"%s: bintval=%d\n",
__func__,
bintval);
return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100)));
}
/* No association context? Default */
return (IWN_PASSIVE_DWELL_BASE);
}
static uint16_t
iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c)
{
uint16_t passive;
if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ;
} else {
passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ;
}
/* Clamp to the beacon interval if we're associated */
return (iwn_limit_dwell(sc, passive));
}
static int
iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap,
struct ieee80211_scan_state *ss, struct ieee80211_channel *c)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni = vap->iv_bss;
struct iwn_scan_hdr *hdr;
struct iwn_cmd_data *tx;
struct iwn_scan_essid *essid;
struct iwn_scan_chan *chan;
struct ieee80211_frame *wh;
struct ieee80211_rateset *rs;
uint8_t *buf, *frm;
uint16_t rxchain;
uint8_t txant;
int buflen, error;
int is_active;
uint16_t dwell_active, dwell_passive;
uint32_t extra, scan_service_time;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/*
* We are absolutely not allowed to send a scan command when another
* scan command is pending.
*/
if (sc->sc_is_scanning) {
device_printf(sc->sc_dev, "%s: called whilst scanning!\n",
__func__);
return (EAGAIN);
}
/* Assign the scan channel */
c = ic->ic_curchan;
sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
if (buf == NULL) {
device_printf(sc->sc_dev,
"%s: could not allocate buffer for scan command\n",
__func__);
return ENOMEM;
}
hdr = (struct iwn_scan_hdr *)buf;
/*
* Move to the next channel if no frames are received within 10ms
* after sending the probe request.
*/
hdr->quiet_time = htole16(10); /* timeout in milliseconds */
hdr->quiet_threshold = htole16(1); /* min # of packets */
/*
* Max needs to be greater than active and passive and quiet!
* It's also in microseconds!
*/
hdr->max_svc = htole32(250 * 1024);
/*
* Reset scan: interval=100
* Normal scan: interval=becaon interval
* suspend_time: 100 (TU)
*
*/
extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22;
//scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024);
scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */
hdr->pause_svc = htole32(scan_service_time);
/* Select antennas for scanning. */
rxchain =
IWN_RXCHAIN_VALID(sc->rxchainmask) |
IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
IWN_RXCHAIN_DRIVER_FORCE;
if (IEEE80211_IS_CHAN_A(c) &&
sc->hw_type == IWN_HW_REV_TYPE_4965) {
/* Ant A must be avoided in 5GHz because of an HW bug. */
rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B);
} else /* Use all available RX antennas. */
rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
hdr->rxchain = htole16(rxchain);
hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
tx = (struct iwn_cmd_data *)(hdr + 1);
tx->flags = htole32(IWN_TX_AUTO_SEQ);
tx->id = sc->broadcast_id;
tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
if (IEEE80211_IS_CHAN_5GHZ(c)) {
/* Send probe requests at 6Mbps. */
tx->rate = htole32(0xd);
rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
} else {
hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
if (sc->hw_type == IWN_HW_REV_TYPE_4965 &&
sc->rxon->associd && sc->rxon->chan > 14)
tx->rate = htole32(0xd);
else {
/* Send probe requests at 1Mbps. */
tx->rate = htole32(10 | IWN_RFLAG_CCK);
}
rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
}
/* Use the first valid TX antenna. */
txant = IWN_LSB(sc->txchainmask);
tx->rate |= htole32(IWN_RFLAG_ANT(txant));
/*
* Only do active scanning if we're announcing a probe request
* for a given SSID (or more, if we ever add it to the driver.)
*/
is_active = 0;
/*
* If we're scanning for a specific SSID, add it to the command.
*
* XXX maybe look at adding support for scanning multiple SSIDs?
*/
essid = (struct iwn_scan_essid *)(tx + 1);
if (ss != NULL) {
if (ss->ss_ssid[0].len != 0) {
essid[0].id = IEEE80211_ELEMID_SSID;
essid[0].len = ss->ss_ssid[0].len;
memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len);
}
DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n",
__func__,
ss->ss_ssid[0].len,
ss->ss_ssid[0].len,
ss->ss_ssid[0].ssid);
if (ss->ss_nssid > 0)
is_active = 1;
}
/*
* Build a probe request frame. Most of the following code is a
* copy & paste of what is done in net80211.
*/
wh = (struct ieee80211_frame *)(essid + 20);
wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
IEEE80211_FC0_SUBTYPE_PROBE_REQ;
wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
- IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
- IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(ifp));
- IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_ifp->if_broadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(vap->iv_ifp));
+ IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_ifp->if_broadcastaddr);
*(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
*(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
frm = (uint8_t *)(wh + 1);
frm = ieee80211_add_ssid(frm, NULL, 0);
frm = ieee80211_add_rates(frm, rs);
if (rs->rs_nrates > IEEE80211_RATE_SIZE)
frm = ieee80211_add_xrates(frm, rs);
if (ic->ic_htcaps & IEEE80211_HTC_HT)
frm = ieee80211_add_htcap(frm, ni);
/* Set length of probe request. */
tx->len = htole16(frm - (uint8_t *)wh);
/*
* If active scanning is requested but a certain channel is
* marked passive, we can do active scanning if we detect
* transmissions.
*
* There is an issue with some firmware versions that triggers
* a sysassert on a "good CRC threshold" of zero (== disabled),
* on a radar channel even though this means that we should NOT
* send probes.
*
* The "good CRC threshold" is the number of frames that we
* need to receive during our dwell time on a channel before
* sending out probes -- setting this to a huge value will
* mean we never reach it, but at the same time work around
* the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
* here instead of IWL_GOOD_CRC_TH_DISABLED.
*
* This was fixed in later versions along with some other
* scan changes, and the threshold behaves as a flag in those
* versions.
*/
/*
* If we're doing active scanning, set the crc_threshold
* to a suitable value. This is different to active veruss
* passive scanning depending upon the channel flags; the
* firmware will obey that particular check for us.
*/
if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN)
hdr->crc_threshold = is_active ?
IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED;
else
hdr->crc_threshold = is_active ?
IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER;
chan = (struct iwn_scan_chan *)frm;
chan->chan = htole16(ieee80211_chan2ieee(ic, c));
chan->flags = 0;
if (ss->ss_nssid > 0)
chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
chan->dsp_gain = 0x6e;
/*
* Set the passive/active flag depending upon the channel mode.
* XXX TODO: take the is_active flag into account as well?
*/
if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
chan->flags |= htole32(IWN_CHAN_PASSIVE);
else
chan->flags |= htole32(IWN_CHAN_ACTIVE);
/*
* Calculate the active/passive dwell times.
*/
dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid);
dwell_passive = iwn_get_passive_dwell_time(sc, c);
/* Make sure they're valid */
if (dwell_passive <= dwell_active)
dwell_passive = dwell_active + 1;
chan->active = htole16(dwell_active);
chan->passive = htole16(dwell_passive);
if (IEEE80211_IS_CHAN_5GHZ(c))
chan->rf_gain = 0x3b;
else
chan->rf_gain = 0x28;
DPRINTF(sc, IWN_DEBUG_STATE,
"%s: chan %u flags 0x%x rf_gain 0x%x "
"dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x "
"isactive=%d numssid=%d\n", __func__,
chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain,
dwell_active, dwell_passive, scan_service_time,
hdr->crc_threshold, is_active, ss->ss_nssid);
hdr->nchan++;
chan++;
buflen = (uint8_t *)chan - buf;
hdr->len = htole16(buflen);
if (sc->sc_is_scanning) {
device_printf(sc->sc_dev,
"%s: called with is_scanning set!\n",
__func__);
}
sc->sc_is_scanning = 1;
DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n",
hdr->nchan);
error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
free(buf, M_DEVBUF);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return error;
}
static int
iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap)
{
struct iwn_ops *ops = &sc->ops;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni = vap->iv_bss;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
/* Update adapter configuration. */
IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
if (ic->ic_flags & IEEE80211_F_SHSLOT)
sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
sc->rxon->cck_mask = 0;
sc->rxon->ofdm_mask = 0x15;
} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
sc->rxon->cck_mask = 0x03;
sc->rxon->ofdm_mask = 0;
} else {
/* Assume 802.11b/g. */
sc->rxon->cck_mask = 0x03;
sc->rxon->ofdm_mask = 0x15;
}
/* try HT */
sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan));
DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask,
sc->rxon->ofdm_mask);
if (sc->sc_is_scanning)
device_printf(sc->sc_dev,
"%s: is_scanning set, before RXON\n",
__func__);
error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
if (error != 0) {
device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n",
__func__, error);
return error;
}
/* Configuration has changed, set TX power accordingly. */
if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set TX power, error %d\n", __func__, error);
return error;
}
/*
* Reconfiguring RXON clears the firmware nodes table so we must
* add the broadcast node again.
*/
if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
device_printf(sc->sc_dev,
"%s: could not add broadcast node, error %d\n", __func__,
error);
return error;
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return 0;
}
static int
iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap)
{
struct iwn_ops *ops = &sc->ops;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni = vap->iv_bss;
struct iwn_node_info node;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX];
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
/* Link LED blinks while monitoring. */
iwn_set_led(sc, IWN_LED_LINK, 5, 5);
return 0;
}
if ((error = iwn_set_timing(sc, ni)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set timing, error %d\n", __func__, error);
return error;
}
/* Update adapter configuration. */
IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid);
sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd));
sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan);
sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
if (ic->ic_flags & IEEE80211_F_SHSLOT)
sc->rxon->flags |= htole32(IWN_RXON_SHSLOT);
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE);
if (IEEE80211_IS_CHAN_A(ni->ni_chan)) {
sc->rxon->cck_mask = 0;
sc->rxon->ofdm_mask = 0x15;
} else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) {
sc->rxon->cck_mask = 0x03;
sc->rxon->ofdm_mask = 0;
} else {
/* Assume 802.11b/g. */
sc->rxon->cck_mask = 0x0f;
sc->rxon->ofdm_mask = 0x15;
}
/* try HT */
sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ni->ni_chan));
sc->rxon->filter |= htole32(IWN_FILTER_BSS);
DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x, curhtprotmode=%d\n",
sc->rxon->chan, le32toh(sc->rxon->flags), ic->ic_curhtprotmode);
if (sc->sc_is_scanning)
device_printf(sc->sc_dev,
"%s: is_scanning set, before RXON\n",
__func__);
error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not update configuration, error %d\n", __func__,
error);
return error;
}
/* Configuration has changed, set TX power accordingly. */
if ((error = ops->set_txpower(sc, ni->ni_chan, 1)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set TX power, error %d\n", __func__, error);
return error;
}
/* Fake a join to initialize the TX rate. */
((struct iwn_node *)ni)->id = IWN_ID_BSS;
iwn_newassoc(ni, 1);
/* Add BSS node. */
memset(&node, 0, sizeof node);
IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
node.id = IWN_ID_BSS;
if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) {
case IEEE80211_HTCAP_SMPS_ENA:
node.htflags |= htole32(IWN_SMPS_MIMO_DIS);
break;
case IEEE80211_HTCAP_SMPS_DYNAMIC:
node.htflags |= htole32(IWN_SMPS_MIMO_PROT);
break;
}
node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) |
IWN_AMDPU_DENSITY(5)); /* 4us */
if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
node.htflags |= htole32(IWN_NODE_HT40);
}
DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__);
error = ops->add_node(sc, &node, 1);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not add BSS node, error %d\n", __func__, error);
return error;
}
DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n",
__func__, node.id);
if ((error = iwn_set_link_quality(sc, ni)) != 0) {
device_printf(sc->sc_dev,
"%s: could not setup link quality for node %d, error %d\n",
__func__, node.id, error);
return error;
}
if ((error = iwn_init_sensitivity(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set sensitivity, error %d\n", __func__,
error);
return error;
}
/* Start periodic calibration timer. */
sc->calib.state = IWN_CALIB_STATE_ASSOC;
sc->calib_cnt = 0;
callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout,
sc);
/* Link LED always on while associated. */
iwn_set_led(sc, IWN_LED_LINK, 0, 1);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return 0;
}
/*
* This function is called by upper layer when an ADDBA request is received
* from another STA and before the ADDBA response is sent.
*/
static int
iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
int baparamset, int batimeout, int baseqctl)
{
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
struct iwn_softc *sc = ni->ni_ic->ic_softc;
struct iwn_ops *ops = &sc->ops;
struct iwn_node *wn = (void *)ni;
struct iwn_node_info node;
uint16_t ssn;
uint8_t tid;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID);
ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START);
memset(&node, 0, sizeof node);
node.id = wn->id;
node.control = IWN_NODE_UPDATE;
node.flags = IWN_FLAG_SET_ADDBA;
node.addba_tid = tid;
node.addba_ssn = htole16(ssn);
DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n",
wn->id, tid, ssn);
error = ops->add_node(sc, &node, 1);
if (error != 0)
return error;
return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
#undef MS
}
/*
* This function is called by upper layer on teardown of an HT-immediate
* Block Ack agreement (eg. uppon receipt of a DELBA frame).
*/
static void
iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
{
struct ieee80211com *ic = ni->ni_ic;
struct iwn_softc *sc = ic->ic_softc;
struct iwn_ops *ops = &sc->ops;
struct iwn_node *wn = (void *)ni;
struct iwn_node_info node;
uint8_t tid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* XXX: tid as an argument */
for (tid = 0; tid < WME_NUM_TID; tid++) {
if (&ni->ni_rx_ampdu[tid] == rap)
break;
}
memset(&node, 0, sizeof node);
node.id = wn->id;
node.control = IWN_NODE_UPDATE;
node.flags = IWN_FLAG_SET_DELBA;
node.delba_tid = tid;
DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid);
(void)ops->add_node(sc, &node, 1);
sc->sc_ampdu_rx_stop(ni, rap);
}
static int
iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
int dialogtoken, int baparamset, int batimeout)
{
struct iwn_softc *sc = ni->ni_ic->ic_softc;
int qid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) {
if (sc->qid2tap[qid] == NULL)
break;
}
if (qid == sc->ntxqs) {
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
__func__);
return 0;
}
tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT);
if (tap->txa_private == NULL) {
device_printf(sc->sc_dev,
"%s: failed to alloc TX aggregation structure\n", __func__);
return 0;
}
sc->qid2tap[qid] = tap;
*(int *)tap->txa_private = qid;
return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
batimeout);
}
static int
iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
int code, int baparamset, int batimeout)
{
struct iwn_softc *sc = ni->ni_ic->ic_softc;
int qid = *(int *)tap->txa_private;
uint8_t tid = tap->txa_tid;
int ret;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
if (code == IEEE80211_STATUS_SUCCESS) {
ni->ni_txseqs[tid] = tap->txa_start & 0xfff;
ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid);
if (ret != 1)
return ret;
} else {
sc->qid2tap[qid] = NULL;
free(tap->txa_private, M_DEVBUF);
tap->txa_private = NULL;
}
return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
}
/*
* This function is called by upper layer when an ADDBA response is received
* from another STA.
*/
static int
iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
uint8_t tid)
{
struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
struct iwn_softc *sc = ni->ni_ic->ic_softc;
struct iwn_ops *ops = &sc->ops;
struct iwn_node *wn = (void *)ni;
struct iwn_node_info node;
int error, qid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Enable TX for the specified RA/TID. */
wn->disable_tid &= ~(1 << tid);
memset(&node, 0, sizeof node);
node.id = wn->id;
node.control = IWN_NODE_UPDATE;
node.flags = IWN_FLAG_SET_DISABLE_TID;
node.disable_tid = htole16(wn->disable_tid);
error = ops->add_node(sc, &node, 1);
if (error != 0)
return 0;
if ((error = iwn_nic_lock(sc)) != 0)
return 0;
qid = *(int *)tap->txa_private;
DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n",
__func__, wn->id, tid, tap->txa_start, qid);
ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff);
iwn_nic_unlock(sc);
iwn_set_link_quality(sc, ni);
return 1;
}
static void
iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
{
struct iwn_softc *sc = ni->ni_ic->ic_softc;
struct iwn_ops *ops = &sc->ops;
uint8_t tid = tap->txa_tid;
int qid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
sc->sc_addba_stop(ni, tap);
if (tap->txa_private == NULL)
return;
qid = *(int *)tap->txa_private;
if (sc->txq[qid].queued != 0)
return;
if (iwn_nic_lock(sc) != 0)
return;
ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff);
iwn_nic_unlock(sc);
sc->qid2tap[qid] = NULL;
free(tap->txa_private, M_DEVBUF);
tap->txa_private = NULL;
}
static void
iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
int qid, uint8_t tid, uint16_t ssn)
{
struct iwn_node *wn = (void *)ni;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Stop TX scheduler while we're changing its configuration. */
iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
IWN4965_TXQ_STATUS_CHGACT);
/* Assign RA/TID translation to the queue. */
iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
wn->id << 4 | tid);
/* Enable chain-building mode for the queue. */
iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
/* Set starting sequence number from the ADDBA request. */
sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
/* Set scheduler window size. */
iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
IWN_SCHED_WINSZ);
/* Set scheduler frame limit. */
iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
IWN_SCHED_LIMIT << 16);
/* Enable interrupts for the queue. */
iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
/* Mark the queue as active. */
iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
iwn_tid2fifo[tid] << 1);
}
static void
iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
{
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Stop TX scheduler while we're changing its configuration. */
iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
IWN4965_TXQ_STATUS_CHGACT);
/* Set starting sequence number from the ADDBA request. */
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
/* Disable interrupts for the queue. */
iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
/* Mark the queue as inactive. */
iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
}
static void
iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
int qid, uint8_t tid, uint16_t ssn)
{
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
struct iwn_node *wn = (void *)ni;
/* Stop TX scheduler while we're changing its configuration. */
iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
IWN5000_TXQ_STATUS_CHGACT);
/* Assign RA/TID translation to the queue. */
iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
wn->id << 4 | tid);
/* Enable chain-building mode for the queue. */
iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
/* Enable aggregation for the queue. */
iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
/* Set starting sequence number from the ADDBA request. */
sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff);
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
/* Set scheduler window size and frame limit. */
iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
/* Enable interrupts for the queue. */
iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
/* Mark the queue as active. */
iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
}
static void
iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn)
{
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Stop TX scheduler while we're changing its configuration. */
iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
IWN5000_TXQ_STATUS_CHGACT);
/* Disable aggregation for the queue. */
iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
/* Set starting sequence number from the ADDBA request. */
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
/* Disable interrupts for the queue. */
iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
/* Mark the queue as inactive. */
iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
}
/*
* Query calibration tables from the initialization firmware. We do this
* only once at first boot. Called from a process context.
*/
static int
iwn5000_query_calibration(struct iwn_softc *sc)
{
struct iwn5000_calib_config cmd;
int error;
memset(&cmd, 0, sizeof cmd);
cmd.ucode.once.enable = htole32(0xffffffff);
cmd.ucode.once.start = htole32(0xffffffff);
cmd.ucode.once.send = htole32(0xffffffff);
cmd.ucode.flags = htole32(0xffffffff);
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n",
__func__);
error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
if (error != 0)
return error;
/* Wait at most two seconds for calibration to complete. */
if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz);
return error;
}
/*
* Send calibration results to the runtime firmware. These results were
* obtained on first boot from the initialization firmware.
*/
static int
iwn5000_send_calibration(struct iwn_softc *sc)
{
int idx, error;
for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) {
if (!(sc->base_params->calib_need & (1<<idx))) {
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"No need of calib %d\n",
idx);
continue; /* no need for this calib */
}
if (sc->calibcmd[idx].buf == NULL) {
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"Need calib idx : %d but no available data\n",
idx);
continue;
}
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"send calibration result idx=%d len=%d\n", idx,
sc->calibcmd[idx].len);
error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
sc->calibcmd[idx].len, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not send calibration result, error %d\n",
__func__, error);
return error;
}
}
return 0;
}
static int
iwn5000_send_wimax_coex(struct iwn_softc *sc)
{
struct iwn5000_wimax_coex wimax;
#if 0
if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
/* Enable WiMAX coexistence for combo adapters. */
wimax.flags =
IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
IWN_WIMAX_COEX_STA_TABLE_VALID |
IWN_WIMAX_COEX_ENABLE;
memcpy(wimax.events, iwn6050_wimax_events,
sizeof iwn6050_wimax_events);
} else
#endif
{
/* Disable WiMAX coexistence. */
wimax.flags = 0;
memset(wimax.events, 0, sizeof wimax.events);
}
DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n",
__func__);
return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
}
static int
iwn5000_crystal_calib(struct iwn_softc *sc)
{
struct iwn5000_phy_calib_crystal cmd;
memset(&cmd, 0, sizeof cmd);
cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
cmd.ngroups = 1;
cmd.isvalid = 1;
cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n",
cmd.cap_pin[0], cmd.cap_pin[1]);
return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
}
static int
iwn5000_temp_offset_calib(struct iwn_softc *sc)
{
struct iwn5000_phy_calib_temp_offset cmd;
memset(&cmd, 0, sizeof cmd);
cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
cmd.ngroups = 1;
cmd.isvalid = 1;
if (sc->eeprom_temp != 0)
cmd.offset = htole16(sc->eeprom_temp);
else
cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET);
DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n",
le16toh(cmd.offset));
return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
}
static int
iwn5000_temp_offset_calibv2(struct iwn_softc *sc)
{
struct iwn5000_phy_calib_temp_offsetv2 cmd;
memset(&cmd, 0, sizeof cmd);
cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET;
cmd.ngroups = 1;
cmd.isvalid = 1;
if (sc->eeprom_temp != 0) {
cmd.offset_low = htole16(sc->eeprom_temp);
cmd.offset_high = htole16(sc->eeprom_temp_high);
} else {
cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET);
cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET);
}
cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage);
DPRINTF(sc, IWN_DEBUG_CALIBRATE,
"setting radio sensor low offset to %d, high offset to %d, voltage to %d\n",
le16toh(cmd.offset_low),
le16toh(cmd.offset_high),
le16toh(cmd.burnt_voltage_ref));
return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
}
/*
* This function is called after the runtime firmware notifies us of its
* readiness (called in a process context).
*/
static int
iwn4965_post_alive(struct iwn_softc *sc)
{
int error, qid;
if ((error = iwn_nic_lock(sc)) != 0)
return error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Clear TX scheduler state in SRAM. */
sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
/* Set physical address of TX scheduler rings (1KB aligned). */
iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
/* Disable chain mode for all our 16 queues. */
iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
/* Set scheduler window size. */
iwn_mem_write(sc, sc->sched_base +
IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
/* Set scheduler frame limit. */
iwn_mem_write(sc, sc->sched_base +
IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
IWN_SCHED_LIMIT << 16);
}
/* Enable interrupts for all our 16 queues. */
iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
/* Identify TX FIFO rings (0-7). */
iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
for (qid = 0; qid < 7; qid++) {
static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
}
iwn_nic_unlock(sc);
return 0;
}
/*
* This function is called after the initialization or runtime firmware
* notifies us of its readiness (called in a process context).
*/
static int
iwn5000_post_alive(struct iwn_softc *sc)
{
int error, qid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Switch to using ICT interrupt mode. */
iwn5000_ict_reset(sc);
if ((error = iwn_nic_lock(sc)) != 0){
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__);
return error;
}
/* Clear TX scheduler state in SRAM. */
sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
/* Set physical address of TX scheduler rings (1KB aligned). */
iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
/* Enable chain mode for all queues, except command queue. */
if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT)
iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf);
else
iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
iwn_mem_write(sc, sc->sched_base +
IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
/* Set scheduler window size and frame limit. */
iwn_mem_write(sc, sc->sched_base +
IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
}
/* Enable interrupts for all our 20 queues. */
iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
/* Identify TX FIFO rings (0-7). */
iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) {
/* Mark TX rings as active. */
for (qid = 0; qid < 11; qid++) {
static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 };
iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
}
} else {
/* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
for (qid = 0; qid < 7; qid++) {
static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
}
}
iwn_nic_unlock(sc);
/* Configure WiMAX coexistence for combo adapters. */
error = iwn5000_send_wimax_coex(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not configure WiMAX coexistence, error %d\n",
__func__, error);
return error;
}
if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
/* Perform crystal calibration. */
error = iwn5000_crystal_calib(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: crystal calibration failed, error %d\n",
__func__, error);
return error;
}
}
if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
/* Query calibration from the initialization firmware. */
if ((error = iwn5000_query_calibration(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not query calibration, error %d\n",
__func__, error);
return error;
}
/*
* We have the calibration results now, reboot with the
* runtime firmware (call ourselves recursively!)
*/
iwn_hw_stop(sc);
error = iwn_hw_init(sc);
} else {
/* Send calibration results to runtime firmware. */
error = iwn5000_send_calibration(sc);
}
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return error;
}
/*
* The firmware boot code is small and is intended to be copied directly into
* the NIC internal memory (no DMA transfer).
*/
static int
iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
{
int error, ntries;
size /= sizeof (uint32_t);
if ((error = iwn_nic_lock(sc)) != 0)
return error;
/* Copy microcode image into NIC memory. */
iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
(const uint32_t *)ucode, size);
iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
/* Start boot load now. */
iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
/* Wait for transfer to complete. */
for (ntries = 0; ntries < 1000; ntries++) {
if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
IWN_BSM_WR_CTRL_START))
break;
DELAY(10);
}
if (ntries == 1000) {
device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
__func__);
iwn_nic_unlock(sc);
return ETIMEDOUT;
}
/* Enable boot after power up. */
iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
iwn_nic_unlock(sc);
return 0;
}
static int
iwn4965_load_firmware(struct iwn_softc *sc)
{
struct iwn_fw_info *fw = &sc->fw;
struct iwn_dma_info *dma = &sc->fw_dma;
int error;
/* Copy initialization sections into pre-allocated DMA-safe memory. */
memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
fw->init.text, fw->init.textsz);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
/* Tell adapter where to find initialization sections. */
if ((error = iwn_nic_lock(sc)) != 0)
return error;
iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
(dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
iwn_nic_unlock(sc);
/* Load firmware boot code. */
error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
if (error != 0) {
device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
__func__);
return error;
}
/* Now press "execute". */
IWN_WRITE(sc, IWN_RESET, 0);
/* Wait at most one second for first alive notification. */
if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
device_printf(sc->sc_dev,
"%s: timeout waiting for adapter to initialize, error %d\n",
__func__, error);
return error;
}
/* Retrieve current temperature for initial TX power calibration. */
sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
sc->temp = iwn4965_get_temperature(sc);
/* Copy runtime sections into pre-allocated DMA-safe memory. */
memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ,
fw->main.text, fw->main.textsz);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
/* Tell adapter where to find runtime sections. */
if ((error = iwn_nic_lock(sc)) != 0)
return error;
iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
(dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
IWN_FW_UPDATED | fw->main.textsz);
iwn_nic_unlock(sc);
return 0;
}
static int
iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
const uint8_t *section, int size)
{
struct iwn_dma_info *dma = &sc->fw_dma;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Copy firmware section into pre-allocated DMA-safe memory. */
memcpy(dma->vaddr, section, size);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
if ((error = iwn_nic_lock(sc)) != 0)
return error;
IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
IWN_FH_TX_CONFIG_DMA_PAUSE);
IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
IWN_LOADDR(dma->paddr));
IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
IWN_HIADDR(dma->paddr) << 28 | size);
IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
IWN_FH_TXBUF_STATUS_TBNUM(1) |
IWN_FH_TXBUF_STATUS_TBIDX(1) |
IWN_FH_TXBUF_STATUS_TFBD_VALID);
/* Kick Flow Handler to start DMA transfer. */
IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
iwn_nic_unlock(sc);
/* Wait at most five seconds for FH DMA transfer to complete. */
return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz);
}
static int
iwn5000_load_firmware(struct iwn_softc *sc)
{
struct iwn_fw_part *fw;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Load the initialization firmware on first boot only. */
fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
&sc->fw.main : &sc->fw.init;
error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
fw->text, fw->textsz);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not load firmware %s section, error %d\n",
__func__, ".text", error);
return error;
}
error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
fw->data, fw->datasz);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not load firmware %s section, error %d\n",
__func__, ".data", error);
return error;
}
/* Now press "execute". */
IWN_WRITE(sc, IWN_RESET, 0);
return 0;
}
/*
* Extract text and data sections from a legacy firmware image.
*/
static int
iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
{
const uint32_t *ptr;
size_t hdrlen = 24;
uint32_t rev;
ptr = (const uint32_t *)fw->data;
rev = le32toh(*ptr++);
sc->ucode_rev = rev;
/* Check firmware API version. */
if (IWN_FW_API(rev) <= 1) {
device_printf(sc->sc_dev,
"%s: bad firmware, need API version >=2\n", __func__);
return EINVAL;
}
if (IWN_FW_API(rev) >= 3) {
/* Skip build number (version 2 header). */
hdrlen += 4;
ptr++;
}
if (fw->size < hdrlen) {
device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
__func__, fw->size);
return EINVAL;
}
fw->main.textsz = le32toh(*ptr++);
fw->main.datasz = le32toh(*ptr++);
fw->init.textsz = le32toh(*ptr++);
fw->init.datasz = le32toh(*ptr++);
fw->boot.textsz = le32toh(*ptr++);
/* Check that all firmware sections fit. */
if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
__func__, fw->size);
return EINVAL;
}
/* Get pointers to firmware sections. */
fw->main.text = (const uint8_t *)ptr;
fw->main.data = fw->main.text + fw->main.textsz;
fw->init.text = fw->main.data + fw->main.datasz;
fw->init.data = fw->init.text + fw->init.textsz;
fw->boot.text = fw->init.data + fw->init.datasz;
return 0;
}
/*
* Extract text and data sections from a TLV firmware image.
*/
static int
iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
uint16_t alt)
{
const struct iwn_fw_tlv_hdr *hdr;
const struct iwn_fw_tlv *tlv;
const uint8_t *ptr, *end;
uint64_t altmask;
uint32_t len, tmp;
if (fw->size < sizeof (*hdr)) {
device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
__func__, fw->size);
return EINVAL;
}
hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n",
__func__, le32toh(hdr->signature));
return EINVAL;
}
DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr,
le32toh(hdr->build));
sc->ucode_rev = le32toh(hdr->rev);
/*
* Select the closest supported alternative that is less than
* or equal to the specified one.
*/
altmask = le64toh(hdr->altmask);
while (alt > 0 && !(altmask & (1ULL << alt)))
alt--; /* Downgrade. */
DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt);
ptr = (const uint8_t *)(hdr + 1);
end = (const uint8_t *)(fw->data + fw->size);
/* Parse type-length-value fields. */
while (ptr + sizeof (*tlv) <= end) {
tlv = (const struct iwn_fw_tlv *)ptr;
len = le32toh(tlv->len);
ptr += sizeof (*tlv);
if (ptr + len > end) {
device_printf(sc->sc_dev,
"%s: firmware too short: %zu bytes\n", __func__,
fw->size);
return EINVAL;
}
/* Skip other alternatives. */
if (tlv->alt != 0 && tlv->alt != htole16(alt))
goto next;
switch (le16toh(tlv->type)) {
case IWN_FW_TLV_MAIN_TEXT:
fw->main.text = ptr;
fw->main.textsz = len;
break;
case IWN_FW_TLV_MAIN_DATA:
fw->main.data = ptr;
fw->main.datasz = len;
break;
case IWN_FW_TLV_INIT_TEXT:
fw->init.text = ptr;
fw->init.textsz = len;
break;
case IWN_FW_TLV_INIT_DATA:
fw->init.data = ptr;
fw->init.datasz = len;
break;
case IWN_FW_TLV_BOOT_TEXT:
fw->boot.text = ptr;
fw->boot.textsz = len;
break;
case IWN_FW_TLV_ENH_SENS:
if (!len)
sc->sc_flags |= IWN_FLAG_ENH_SENS;
break;
case IWN_FW_TLV_PHY_CALIB:
tmp = le32toh(*ptr);
if (tmp < 253) {
sc->reset_noise_gain = tmp;
sc->noise_gain = tmp + 1;
}
break;
case IWN_FW_TLV_PAN:
sc->sc_flags |= IWN_FLAG_PAN_SUPPORT;
DPRINTF(sc, IWN_DEBUG_RESET,
"PAN Support found: %d\n", 1);
break;
case IWN_FW_TLV_FLAGS:
if (len < sizeof(uint32_t))
break;
if (len % sizeof(uint32_t))
break;
sc->tlv_feature_flags = le32toh(*ptr);
DPRINTF(sc, IWN_DEBUG_RESET,
"%s: feature: 0x%08x\n",
__func__,
sc->tlv_feature_flags);
break;
case IWN_FW_TLV_PBREQ_MAXLEN:
case IWN_FW_TLV_RUNT_EVTLOG_PTR:
case IWN_FW_TLV_RUNT_EVTLOG_SIZE:
case IWN_FW_TLV_RUNT_ERRLOG_PTR:
case IWN_FW_TLV_INIT_EVTLOG_PTR:
case IWN_FW_TLV_INIT_EVTLOG_SIZE:
case IWN_FW_TLV_INIT_ERRLOG_PTR:
case IWN_FW_TLV_WOWLAN_INST:
case IWN_FW_TLV_WOWLAN_DATA:
DPRINTF(sc, IWN_DEBUG_RESET,
"TLV type %d recognized but not handled\n",
le16toh(tlv->type));
break;
default:
DPRINTF(sc, IWN_DEBUG_RESET,
"TLV type %d not handled\n", le16toh(tlv->type));
break;
}
next: /* TLV fields are 32-bit aligned. */
ptr += (len + 3) & ~3;
}
return 0;
}
static int
iwn_read_firmware(struct iwn_softc *sc)
{
struct iwn_fw_info *fw = &sc->fw;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
IWN_UNLOCK(sc);
memset(fw, 0, sizeof (*fw));
/* Read firmware image from filesystem. */
sc->fw_fp = firmware_get(sc->fwname);
if (sc->fw_fp == NULL) {
device_printf(sc->sc_dev, "%s: could not read firmware %s\n",
__func__, sc->fwname);
IWN_LOCK(sc);
return EINVAL;
}
IWN_LOCK(sc);
fw->size = sc->fw_fp->datasize;
fw->data = (const uint8_t *)sc->fw_fp->data;
if (fw->size < sizeof (uint32_t)) {
device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n",
__func__, fw->size);
firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
sc->fw_fp = NULL;
return EINVAL;
}
/* Retrieve text and data sections. */
if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
error = iwn_read_firmware_leg(sc, fw);
else
error = iwn_read_firmware_tlv(sc, fw, 1);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not read firmware sections, error %d\n",
__func__, error);
firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
sc->fw_fp = NULL;
return error;
}
device_printf(sc->sc_dev, "%s: ucode rev=0x%08x\n", __func__, sc->ucode_rev);
/* Make sure text and data sections fit in hardware memory. */
if (fw->main.textsz > sc->fw_text_maxsz ||
fw->main.datasz > sc->fw_data_maxsz ||
fw->init.textsz > sc->fw_text_maxsz ||
fw->init.datasz > sc->fw_data_maxsz ||
fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
(fw->boot.textsz & 3) != 0) {
device_printf(sc->sc_dev, "%s: firmware sections too large\n",
__func__);
firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
sc->fw_fp = NULL;
return EINVAL;
}
/* We can proceed with loading the firmware. */
return 0;
}
static int
iwn_clock_wait(struct iwn_softc *sc)
{
int ntries;
/* Set "initialization complete" bit. */
IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
/* Wait for clock stabilization. */
for (ntries = 0; ntries < 2500; ntries++) {
if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
return 0;
DELAY(10);
}
device_printf(sc->sc_dev,
"%s: timeout waiting for clock stabilization\n", __func__);
return ETIMEDOUT;
}
static int
iwn_apm_init(struct iwn_softc *sc)
{
uint32_t reg;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Disable L0s exit timer (NMI bug workaround). */
IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
/* Don't wait for ICH L0s (ICH bug workaround). */
IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
/* Set FH wait threshold to max (HW bug under stress workaround). */
IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
/* Enable HAP INTA to move adapter from L1a to L0s. */
IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
/* Retrieve PCIe Active State Power Management (ASPM). */
reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
if (reg & 0x02) /* L1 Entry enabled. */
IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
else
IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
if (sc->base_params->pll_cfg_val)
IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val);
/* Wait for clock stabilization before accessing prph. */
if ((error = iwn_clock_wait(sc)) != 0)
return error;
if ((error = iwn_nic_lock(sc)) != 0)
return error;
if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
/* Enable DMA and BSM (Bootstrap State Machine). */
iwn_prph_write(sc, IWN_APMG_CLK_EN,
IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
} else {
/* Enable DMA. */
iwn_prph_write(sc, IWN_APMG_CLK_EN,
IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
}
DELAY(20);
/* Disable L1-Active. */
iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
iwn_nic_unlock(sc);
return 0;
}
static void
iwn_apm_stop_master(struct iwn_softc *sc)
{
int ntries;
/* Stop busmaster DMA activity. */
IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
for (ntries = 0; ntries < 100; ntries++) {
if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
return;
DELAY(10);
}
device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__);
}
static void
iwn_apm_stop(struct iwn_softc *sc)
{
iwn_apm_stop_master(sc);
/* Reset the entire device. */
IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
DELAY(10);
/* Clear "initialization complete" bit. */
IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
}
static int
iwn4965_nic_config(struct iwn_softc *sc)
{
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
/*
* I don't believe this to be correct but this is what the
* vendor driver is doing. Probably the bits should not be
* shifted in IWN_RFCFG_*.
*/
IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
IWN_RFCFG_TYPE(sc->rfcfg) |
IWN_RFCFG_STEP(sc->rfcfg) |
IWN_RFCFG_DASH(sc->rfcfg));
}
IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
return 0;
}
static int
iwn5000_nic_config(struct iwn_softc *sc)
{
uint32_t tmp;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
IWN_RFCFG_TYPE(sc->rfcfg) |
IWN_RFCFG_STEP(sc->rfcfg) |
IWN_RFCFG_DASH(sc->rfcfg));
}
IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
if ((error = iwn_nic_lock(sc)) != 0)
return error;
iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
/*
* Select first Switching Voltage Regulator (1.32V) to
* solve a stability issue related to noisy DC2DC line
* in the silicon of 1000 Series.
*/
tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
}
iwn_nic_unlock(sc);
if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
/* Use internal power amplifier only. */
IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
}
if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) {
/* Indicate that ROM calibration version is >=6. */
IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
}
if (sc->base_params->additional_gp_drv_bit)
IWN_SETBITS(sc, IWN_GP_DRIVER,
sc->base_params->additional_gp_drv_bit);
return 0;
}
/*
* Take NIC ownership over Intel Active Management Technology (AMT).
*/
static int
iwn_hw_prepare(struct iwn_softc *sc)
{
int ntries;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
/* Check if hardware is ready. */
IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
for (ntries = 0; ntries < 5; ntries++) {
if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
IWN_HW_IF_CONFIG_NIC_READY)
return 0;
DELAY(10);
}
/* Hardware not ready, force into ready state. */
IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
for (ntries = 0; ntries < 15000; ntries++) {
if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
IWN_HW_IF_CONFIG_PREPARE_DONE))
break;
DELAY(10);
}
if (ntries == 15000)
return ETIMEDOUT;
/* Hardware should be ready now. */
IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
for (ntries = 0; ntries < 5; ntries++) {
if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
IWN_HW_IF_CONFIG_NIC_READY)
return 0;
DELAY(10);
}
return ETIMEDOUT;
}
static int
iwn_hw_init(struct iwn_softc *sc)
{
struct iwn_ops *ops = &sc->ops;
int error, chnl, qid;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
/* Clear pending interrupts. */
IWN_WRITE(sc, IWN_INT, 0xffffffff);
if ((error = iwn_apm_init(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not power ON adapter, error %d\n", __func__,
error);
return error;
}
/* Select VMAIN power source. */
if ((error = iwn_nic_lock(sc)) != 0)
return error;
iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
iwn_nic_unlock(sc);
/* Perform adapter-specific initialization. */
if ((error = ops->nic_config(sc)) != 0)
return error;
/* Initialize RX ring. */
if ((error = iwn_nic_lock(sc)) != 0)
return error;
IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
/* Set physical address of RX ring (256-byte aligned). */
IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
/* Set physical address of RX status (16-byte aligned). */
IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
/* Enable RX. */
IWN_WRITE(sc, IWN_FH_RX_CONFIG,
IWN_FH_RX_CONFIG_ENA |
IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
IWN_FH_RX_CONFIG_IRQ_DST_HOST |
IWN_FH_RX_CONFIG_SINGLE_FRAME |
IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
iwn_nic_unlock(sc);
IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
if ((error = iwn_nic_lock(sc)) != 0)
return error;
/* Initialize TX scheduler. */
iwn_prph_write(sc, sc->sched_txfact_addr, 0);
/* Set physical address of "keep warm" page (16-byte aligned). */
IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
/* Initialize TX rings. */
for (qid = 0; qid < sc->ntxqs; qid++) {
struct iwn_tx_ring *txq = &sc->txq[qid];
/* Set physical address of TX ring (256-byte aligned). */
IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
txq->desc_dma.paddr >> 8);
}
iwn_nic_unlock(sc);
/* Enable DMA channels. */
for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
IWN_FH_TX_CONFIG_DMA_ENA |
IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
}
/* Clear "radio off" and "commands blocked" bits. */
IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
/* Clear pending interrupts. */
IWN_WRITE(sc, IWN_INT, 0xffffffff);
/* Enable interrupt coalescing. */
IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
/* Enable interrupts. */
IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
/* _Really_ make sure "radio off" bit is cleared! */
IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
/* Enable shadow registers. */
if (sc->base_params->shadow_reg_enable)
IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
if ((error = ops->load_firmware(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not load firmware, error %d\n", __func__,
error);
return error;
}
/* Wait at most one second for firmware alive notification. */
if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) {
device_printf(sc->sc_dev,
"%s: timeout waiting for adapter to initialize, error %d\n",
__func__, error);
return error;
}
/* Do post-firmware initialization. */
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return ops->post_alive(sc);
}
static void
iwn_hw_stop(struct iwn_softc *sc)
{
int chnl, qid, ntries;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
/* Disable interrupts. */
IWN_WRITE(sc, IWN_INT_MASK, 0);
IWN_WRITE(sc, IWN_INT, 0xffffffff);
IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
sc->sc_flags &= ~IWN_FLAG_USE_ICT;
/* Make sure we no longer hold the NIC lock. */
iwn_nic_unlock(sc);
/* Stop TX scheduler. */
iwn_prph_write(sc, sc->sched_txfact_addr, 0);
/* Stop all DMA channels. */
if (iwn_nic_lock(sc) == 0) {
for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
for (ntries = 0; ntries < 200; ntries++) {
if (IWN_READ(sc, IWN_FH_TX_STATUS) &
IWN_FH_TX_STATUS_IDLE(chnl))
break;
DELAY(10);
}
}
iwn_nic_unlock(sc);
}
/* Stop RX ring. */
iwn_reset_rx_ring(sc, &sc->rxq);
/* Reset all TX rings. */
for (qid = 0; qid < sc->ntxqs; qid++)
iwn_reset_tx_ring(sc, &sc->txq[qid]);
if (iwn_nic_lock(sc) == 0) {
iwn_prph_write(sc, IWN_APMG_CLK_DIS,
IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
iwn_nic_unlock(sc);
}
DELAY(5);
/* Power OFF adapter. */
iwn_apm_stop(sc);
}
static void
iwn_radio_on(void *arg0, int pending)
{
struct iwn_softc *sc = arg0;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
if (vap != NULL) {
iwn_init(sc);
ieee80211_init(vap);
}
}
static void
iwn_radio_off(void *arg0, int pending)
{
struct iwn_softc *sc = arg0;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
iwn_stop(sc);
if (vap != NULL)
ieee80211_stop(vap);
/* Enable interrupts to get RF toggle notification. */
IWN_LOCK(sc);
IWN_WRITE(sc, IWN_INT, 0xffffffff);
IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
IWN_UNLOCK(sc);
}
static void
iwn_panicked(void *arg0, int pending)
{
struct iwn_softc *sc = arg0;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
int error;
if (vap == NULL) {
printf("%s: null vap\n", __func__);
return;
}
device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
"resetting...\n", __func__, vap->iv_state);
IWN_LOCK(sc);
iwn_stop_locked(sc);
iwn_init_locked(sc);
if (vap->iv_state >= IEEE80211_S_AUTH &&
(error = iwn_auth(sc, vap)) != 0) {
device_printf(sc->sc_dev,
"%s: could not move to auth state\n", __func__);
}
if (vap->iv_state >= IEEE80211_S_RUN &&
(error = iwn_run(sc, vap)) != 0) {
device_printf(sc->sc_dev,
"%s: could not move to run state\n", __func__);
}
/* Only run start once the NIC is in a useful state, like associated */
- iwn_start_locked(sc->sc_ifp);
+ iwn_start_locked(sc);
IWN_UNLOCK(sc);
}
static void
iwn_init_locked(struct iwn_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__);
IWN_LOCK_ASSERT(sc);
+ sc->sc_flags |= IWN_FLAG_RUNNING;
+
if ((error = iwn_hw_prepare(sc)) != 0) {
device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n",
__func__, error);
goto fail;
}
/* Initialize interrupt mask to default value. */
sc->int_mask = IWN_INT_MASK_DEF;
sc->sc_flags &= ~IWN_FLAG_USE_ICT;
/* Check that the radio is not disabled by hardware switch. */
if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
device_printf(sc->sc_dev,
"radio is disabled by hardware switch\n");
/* Enable interrupts to get RF toggle notifications. */
IWN_WRITE(sc, IWN_INT, 0xffffffff);
IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
return;
}
/* Read firmware images from the filesystem. */
if ((error = iwn_read_firmware(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not read firmware, error %d\n", __func__,
error);
goto fail;
}
/* Initialize hardware and upload firmware. */
error = iwn_hw_init(sc);
firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
sc->fw_fp = NULL;
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not initialize hardware, error %d\n", __func__,
error);
goto fail;
}
/* Configure adapter now that it is ready. */
if ((error = iwn_config(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not configure device, error %d\n", __func__,
error);
goto fail;
}
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
-
callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__);
return;
-fail: iwn_stop_locked(sc);
+fail:
+ sc->sc_flags &= ~IWN_FLAG_RUNNING;
+ iwn_stop_locked(sc);
DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__);
}
static void
-iwn_init(void *arg)
+iwn_init(struct iwn_softc *sc)
{
- struct iwn_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
IWN_LOCK(sc);
iwn_init_locked(sc);
IWN_UNLOCK(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ieee80211_start_all(ic);
+ if (sc->sc_flags & IWN_FLAG_RUNNING)
+ ieee80211_start_all(&sc->sc_ic);
}
static void
iwn_stop_locked(struct iwn_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
IWN_LOCK_ASSERT(sc);
sc->sc_is_scanning = 0;
sc->sc_tx_timer = 0;
callout_stop(&sc->watchdog_to);
callout_stop(&sc->calib_to);
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_flags &= ~IWN_FLAG_RUNNING;
/* Power OFF hardware. */
iwn_hw_stop(sc);
}
static void
iwn_stop(struct iwn_softc *sc)
{
IWN_LOCK(sc);
iwn_stop_locked(sc);
IWN_UNLOCK(sc);
}
/*
* Callback from net80211 to start a scan.
*/
static void
iwn_scan_start(struct ieee80211com *ic)
{
struct iwn_softc *sc = ic->ic_softc;
IWN_LOCK(sc);
/* make the link LED blink while we're scanning */
iwn_set_led(sc, IWN_LED_LINK, 20, 2);
IWN_UNLOCK(sc);
}
/*
* Callback from net80211 to terminate a scan.
*/
static void
iwn_scan_end(struct ieee80211com *ic)
{
struct iwn_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
IWN_LOCK(sc);
if (vap->iv_state == IEEE80211_S_RUN) {
/* Set link LED to ON status if we are associated */
iwn_set_led(sc, IWN_LED_LINK, 0, 1);
}
IWN_UNLOCK(sc);
}
/*
* Callback from net80211 to force a channel change.
*/
static void
iwn_set_channel(struct ieee80211com *ic)
{
const struct ieee80211_channel *c = ic->ic_curchan;
struct iwn_softc *sc = ic->ic_softc;
int error;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
IWN_LOCK(sc);
sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
/*
* Only need to set the channel in Monitor mode. AP scanning and auth
* are already taken care of by their respective firmware commands.
*/
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
error = iwn_config(sc);
if (error != 0)
device_printf(sc->sc_dev,
"%s: error %d settting channel\n", __func__, error);
}
IWN_UNLOCK(sc);
}
/*
* Callback from net80211 to start scanning of the current channel.
*/
static void
iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
{
struct ieee80211vap *vap = ss->ss_vap;
struct ieee80211com *ic = vap->iv_ic;
struct iwn_softc *sc = ic->ic_softc;
int error;
IWN_LOCK(sc);
error = iwn_scan(sc, vap, ss, ic->ic_curchan);
IWN_UNLOCK(sc);
if (error != 0)
ieee80211_cancel_scan(vap);
}
/*
* Callback from net80211 to handle the minimum dwell time being met.
* The intent is to terminate the scan but we just let the firmware
* notify us when it's finished as we have no safe way to abort it.
*/
static void
iwn_scan_mindwell(struct ieee80211_scan_state *ss)
{
/* NB: don't try to abort scan; wait for firmware to finish */
}
static void
iwn_hw_reset(void *arg0, int pending)
{
struct iwn_softc *sc = arg0;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__);
iwn_stop(sc);
iwn_init(sc);
ieee80211_notify_radio(ic, 1);
}
#ifdef IWN_DEBUG
#define IWN_DESC(x) case x: return #x
/*
* Translate CSR code to string
*/
static char *iwn_get_csr_string(int csr)
{
switch (csr) {
IWN_DESC(IWN_HW_IF_CONFIG);
IWN_DESC(IWN_INT_COALESCING);
IWN_DESC(IWN_INT);
IWN_DESC(IWN_INT_MASK);
IWN_DESC(IWN_FH_INT);
IWN_DESC(IWN_GPIO_IN);
IWN_DESC(IWN_RESET);
IWN_DESC(IWN_GP_CNTRL);
IWN_DESC(IWN_HW_REV);
IWN_DESC(IWN_EEPROM);
IWN_DESC(IWN_EEPROM_GP);
IWN_DESC(IWN_OTP_GP);
IWN_DESC(IWN_GIO);
IWN_DESC(IWN_GP_UCODE);
IWN_DESC(IWN_GP_DRIVER);
IWN_DESC(IWN_UCODE_GP1);
IWN_DESC(IWN_UCODE_GP2);
IWN_DESC(IWN_LED);
IWN_DESC(IWN_DRAM_INT_TBL);
IWN_DESC(IWN_GIO_CHICKEN);
IWN_DESC(IWN_ANA_PLL);
IWN_DESC(IWN_HW_REV_WA);
IWN_DESC(IWN_DBG_HPET_MEM);
default:
return "UNKNOWN CSR";
}
}
/*
* This function print firmware register
*/
static void
iwn_debug_register(struct iwn_softc *sc)
{
int i;
static const uint32_t csr_tbl[] = {
IWN_HW_IF_CONFIG,
IWN_INT_COALESCING,
IWN_INT,
IWN_INT_MASK,
IWN_FH_INT,
IWN_GPIO_IN,
IWN_RESET,
IWN_GP_CNTRL,
IWN_HW_REV,
IWN_EEPROM,
IWN_EEPROM_GP,
IWN_OTP_GP,
IWN_GIO,
IWN_GP_UCODE,
IWN_GP_DRIVER,
IWN_UCODE_GP1,
IWN_UCODE_GP2,
IWN_LED,
IWN_DRAM_INT_TBL,
IWN_GIO_CHICKEN,
IWN_ANA_PLL,
IWN_HW_REV_WA,
IWN_DBG_HPET_MEM,
};
DPRINTF(sc, IWN_DEBUG_REGISTER,
"CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s",
"\n");
for (i = 0; i < nitems(csr_tbl); i++){
DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ",
iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i]));
if ((i+1) % 3 == 0)
DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
}
DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n");
}
#endif
Index: head/sys/dev/iwn/if_iwnvar.h
===================================================================
--- head/sys/dev/iwn/if_iwnvar.h (revision 287196)
+++ head/sys/dev/iwn/if_iwnvar.h (revision 287197)
@@ -1,439 +1,437 @@
/* $FreeBSD$ */
/* $OpenBSD: if_iwnvar.h,v 1.18 2010/04/30 16:06:46 damien Exp $ */
/*-
* Copyright (c) 2013 Cedric GROSS <cg@cgross.info>
* Copyright (c) 2011 Intel Corporation
* Copyright (c) 2007, 2008
* Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2008 Sam Leffler, Errno Consulting
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
enum iwn_rxon_ctx_id {
IWN_RXON_BSS_CTX,
IWN_RXON_PAN_CTX,
IWN_NUM_RXON_CTX
};
struct iwn_pan_slot {
uint16_t time;
uint8_t type;
uint8_t reserved;
} __packed;
struct iwn_pan_params_cmd {
uint16_t flags;
#define IWN_PAN_PARAMS_FLG_SLOTTED_MODE (1 << 3)
uint8_t reserved;
uint8_t num_slots;
struct iwn_pan_slot slots[10];
} __packed;
struct iwn_led_mode
{
uint8_t led_cur_mode;
uint64_t led_cur_bt;
uint64_t led_last_bt;
uint64_t led_cur_tpt;
uint64_t led_last_tpt;
uint64_t led_bt_diff;
int led_cur_time;
int led_last_time;
};
struct iwn_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsft;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_dbm_antsignal;
int8_t wr_dbm_antnoise;
} __packed;
#define IWN_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
struct iwn_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed;
#define IWN_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct iwn_dma_info {
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_dma_segment_t seg;
bus_addr_t paddr;
caddr_t vaddr;
bus_size_t size;
};
struct iwn_tx_data {
bus_dmamap_t map;
bus_addr_t cmd_paddr;
bus_addr_t scratch_paddr;
struct mbuf *m;
struct ieee80211_node *ni;
};
struct iwn_tx_ring {
struct iwn_dma_info desc_dma;
struct iwn_dma_info cmd_dma;
struct iwn_tx_desc *desc;
struct iwn_tx_cmd *cmd;
struct iwn_tx_data data[IWN_TX_RING_COUNT];
bus_dma_tag_t data_dmat;
int qid;
int queued;
int cur;
int read;
};
struct iwn_softc;
struct iwn_rx_data {
struct mbuf *m;
bus_dmamap_t map;
};
struct iwn_rx_ring {
struct iwn_dma_info desc_dma;
struct iwn_dma_info stat_dma;
uint32_t *desc;
struct iwn_rx_status *stat;
struct iwn_rx_data data[IWN_RX_RING_COUNT];
bus_dma_tag_t data_dmat;
int cur;
};
struct iwn_node {
struct ieee80211_node ni; /* must be the first */
uint16_t disable_tid;
uint8_t id;
struct {
uint64_t bitmap;
int startidx;
int nframes;
} agg[IEEE80211_TID_SIZE];
};
struct iwn_calib_state {
uint8_t state;
#define IWN_CALIB_STATE_INIT 0
#define IWN_CALIB_STATE_ASSOC 1
#define IWN_CALIB_STATE_RUN 2
u_int nbeacons;
uint32_t noise[3];
uint32_t rssi[3];
uint32_t ofdm_x1;
uint32_t ofdm_mrc_x1;
uint32_t ofdm_x4;
uint32_t ofdm_mrc_x4;
uint32_t cck_x4;
uint32_t cck_mrc_x4;
uint32_t bad_plcp_ofdm;
uint32_t fa_ofdm;
uint32_t bad_plcp_cck;
uint32_t fa_cck;
uint32_t low_fa;
uint32_t bad_plcp_ht;
uint8_t cck_state;
#define IWN_CCK_STATE_INIT 0
#define IWN_CCK_STATE_LOFA 1
#define IWN_CCK_STATE_HIFA 2
uint8_t noise_samples[20];
u_int cur_noise_sample;
uint8_t noise_ref;
uint32_t energy_samples[10];
u_int cur_energy_sample;
uint32_t energy_cck;
};
struct iwn_calib_info {
uint8_t *buf;
u_int len;
};
struct iwn_fw_part {
const uint8_t *text;
uint32_t textsz;
const uint8_t *data;
uint32_t datasz;
};
struct iwn_fw_info {
const uint8_t *data;
size_t size;
struct iwn_fw_part init;
struct iwn_fw_part main;
struct iwn_fw_part boot;
};
struct iwn_ops {
int (*load_firmware)(struct iwn_softc *);
void (*read_eeprom)(struct iwn_softc *);
int (*post_alive)(struct iwn_softc *);
int (*nic_config)(struct iwn_softc *);
void (*update_sched)(struct iwn_softc *, int, int, uint8_t,
uint16_t);
int (*get_temperature)(struct iwn_softc *);
int (*get_rssi)(struct iwn_softc *, struct iwn_rx_stat *);
int (*set_txpower)(struct iwn_softc *,
struct ieee80211_channel *, int);
int (*init_gains)(struct iwn_softc *);
int (*set_gains)(struct iwn_softc *);
int (*add_node)(struct iwn_softc *, struct iwn_node_info *,
int);
void (*tx_done)(struct iwn_softc *, struct iwn_rx_desc *,
struct iwn_rx_data *);
void (*ampdu_tx_start)(struct iwn_softc *,
struct ieee80211_node *, int, uint8_t, uint16_t);
void (*ampdu_tx_stop)(struct iwn_softc *, int, uint8_t,
uint16_t);
};
struct iwn_vap {
struct ieee80211vap iv_vap;
uint8_t iv_ridx;
int (*iv_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
int ctx;
int beacon_int;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
};
#define IWN_VAP(_vap) ((struct iwn_vap *)(_vap))
struct iwn_softc {
device_t sc_dev;
-
- struct ifnet *sc_ifp;
int sc_debug;
-
struct mtx sc_mtx;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
u_int sc_flags;
#define IWN_FLAG_HAS_OTPROM (1 << 1)
#define IWN_FLAG_CALIB_DONE (1 << 2)
#define IWN_FLAG_USE_ICT (1 << 3)
#define IWN_FLAG_INTERNAL_PA (1 << 4)
#define IWN_FLAG_HAS_11N (1 << 6)
#define IWN_FLAG_ENH_SENS (1 << 7)
#define IWN_FLAG_ADV_BTCOEX (1 << 8)
#define IWN_FLAG_PAN_SUPPORT (1 << 9)
#define IWN_FLAG_BTCOEX (1 << 10)
+#define IWN_FLAG_RUNNING (1 << 11)
uint8_t hw_type;
/* subdevice_id used to adjust configuration */
uint16_t subdevice_id;
struct iwn_ops ops;
const char *fwname;
const struct iwn_sensitivity_limits
*limits;
int ntxqs;
int firstaggqueue;
int ndmachnls;
uint8_t broadcast_id;
int rxonsz;
int schedsz;
uint32_t fw_text_maxsz;
uint32_t fw_data_maxsz;
uint32_t fwsz;
bus_size_t sched_txfact_addr;
uint32_t reset_noise_gain;
uint32_t noise_gain;
/* TX scheduler rings. */
struct iwn_dma_info sched_dma;
uint16_t *sched;
uint32_t sched_base;
/* "Keep Warm" page. */
struct iwn_dma_info kw_dma;
/* Firmware image. */
const struct firmware *fw_fp;
/* Firmware DMA transfer. */
struct iwn_dma_info fw_dma;
/* ICT table. */
struct iwn_dma_info ict_dma;
uint32_t *ict;
int ict_cur;
/* TX/RX rings. */
struct iwn_tx_ring txq[IWN5000_NTXQUEUES];
struct iwn_rx_ring rxq;
struct resource *mem;
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
struct resource *irq;
void *sc_ih;
bus_size_t sc_sz;
int sc_cap_off; /* PCIe Capabilities. */
/* Tasks used by the driver */
struct task sc_reinit_task;
struct task sc_radioon_task;
struct task sc_radiooff_task;
struct task sc_panic_task;
struct task sc_xmit_task;
/* Taskqueue */
struct taskqueue *sc_tq;
/* Calibration information */
struct callout calib_to;
int calib_cnt;
struct iwn_calib_state calib;
int last_calib_ticks;
struct callout watchdog_to;
- struct callout ct_kill_exit_to;
struct iwn_fw_info fw;
struct iwn_calib_info calibcmd[IWN5000_PHY_CALIB_MAX_RESULT];
uint32_t errptr;
struct iwn_rx_stat last_rx_stat;
int last_rx_valid;
struct iwn_ucode_info ucode_info;
struct iwn_rxon rx_on[IWN_NUM_RXON_CTX];
struct iwn_rxon *rxon;
int ctx;
struct ieee80211vap *ivap[IWN_NUM_RXON_CTX];
/* General statistics */
/*
* The statistics are reset after each channel
* change. So it may be zeroed after things like
* a background scan.
*
* So for now, this is just a cheap hack to
* expose the last received statistics dump
* via an ioctl(). Later versions of this
* could expose the last 'n' messages, or just
* provide a pipeline for the firmware responses
* via something like BPF.
*/
struct iwn_stats last_stat;
int last_stat_valid;
uint8_t uc_scan_progress;
uint32_t rawtemp;
int temp;
int noise;
uint32_t qfullmsk;
uint32_t prom_base;
struct iwn4965_eeprom_band
bands[IWN_NBANDS];
struct iwn_eeprom_chan eeprom_channels[IWN_NBANDS][IWN_MAX_CHAN_PER_BAND];
uint16_t rfcfg;
uint8_t calib_ver;
char eeprom_domain[4];
uint32_t eeprom_crystal;
int16_t eeprom_temp;
int16_t eeprom_temp_high;
int16_t eeprom_voltage;
int8_t maxpwr2GHz;
int8_t maxpwr5GHz;
int8_t maxpwr[IEEE80211_CHAN_MAX];
uint32_t tlv_feature_flags;
int32_t temp_off;
uint32_t int_mask;
uint8_t ntxchains;
uint8_t nrxchains;
uint8_t txchainmask;
uint8_t rxchainmask;
uint8_t chainmask;
int sc_tx_timer;
int sc_scan_timer;
/* Are we doing a scan? */
int sc_is_scanning;
/* Are we waiting for a beacon before xmit? */
int sc_beacon_wait;
struct ieee80211_tx_ampdu *qid2tap[IWN5000_NTXQUEUES];
int (*sc_ampdu_rx_start)(struct ieee80211_node *,
struct ieee80211_rx_ampdu *, int, int, int);
void (*sc_ampdu_rx_stop)(struct ieee80211_node *,
struct ieee80211_rx_ampdu *);
int (*sc_addba_request)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int, int, int);
int (*sc_addba_response)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int, int, int);
void (*sc_addba_stop)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
struct iwn_led_mode sc_led;
struct iwn_rx_radiotap_header sc_rxtap;
struct iwn_tx_radiotap_header sc_txtap;
/* The power save level originally configured by user */
int desired_pwrsave_level;
/*
* The current power save level, this may differ from the
* configured value due to thermal throttling etc.
*/
int current_pwrsave_level;
/* For specific params */
const struct iwn_base_params *base_params;
#define IWN_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
uint32_t ucode_rev;
/*
* Global queue for queuing xmit frames
* when we can't yet transmit (eg raw
* frames whilst waiting for beacons.)
*/
struct mbufq sc_xmit_queue;
};
#define IWN_LOCK_INIT(_sc) \
mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
MTX_NETWORK_LOCK, MTX_DEF)
#define IWN_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define IWN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
#define IWN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define IWN_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
Index: head/sys/dev/malo/if_malo.c
===================================================================
--- head/sys/dev/malo/if_malo.c (revision 287196)
+++ head/sys/dev/malo/if_malo.c (revision 287197)
@@ -1,2277 +1,2194 @@
/*-
* Copyright (c) 2008 Weongyo Jeong <weongyo@freebsd.org>
* Copyright (c) 2007 Marvell Semiconductor, Inc.
* Copyright (c) 2007 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
#ifdef __FreeBSD__
__FBSDID("$FreeBSD$");
#endif
#include "opt_malo.h"
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <machine/bus.h>
#include <sys/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/ethernet.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net/bpf.h>
#include <dev/malo/if_malo.h>
SYSCTL_NODE(_hw, OID_AUTO, malo, CTLFLAG_RD, 0,
"Marvell 88w8335 driver parameters");
static int malo_txcoalesce = 8; /* # tx pkts to q before poking f/w*/
SYSCTL_INT(_hw_malo, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &malo_txcoalesce,
0, "tx buffers to send at once");
static int malo_rxbuf = MALO_RXBUF; /* # rx buffers to allocate */
SYSCTL_INT(_hw_malo, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &malo_rxbuf,
0, "rx buffers allocated");
static int malo_rxquota = MALO_RXBUF; /* # max buffers to process */
SYSCTL_INT(_hw_malo, OID_AUTO, rxquota, CTLFLAG_RWTUN, &malo_rxquota,
0, "max rx buffers to process per interrupt");
static int malo_txbuf = MALO_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_malo, OID_AUTO, txbuf, CTLFLAG_RWTUN, &malo_txbuf,
0, "tx buffers allocated");
#ifdef MALO_DEBUG
static int malo_debug = 0;
SYSCTL_INT(_hw_malo, OID_AUTO, debug, CTLFLAG_RWTUN, &malo_debug,
0, "control debugging printfs");
enum {
MALO_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
MALO_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
MALO_DEBUG_RECV = 0x00000004, /* basic recv operation */
MALO_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
MALO_DEBUG_RESET = 0x00000010, /* reset processing */
MALO_DEBUG_INTR = 0x00000040, /* ISR */
MALO_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */
MALO_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */
MALO_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */
MALO_DEBUG_NODE = 0x00000800, /* node management */
MALO_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */
MALO_DEBUG_FW = 0x00008000, /* firmware */
MALO_DEBUG_ANY = 0xffffffff
};
#define IS_BEACON(wh) \
((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | \
IEEE80211_FC0_SUBTYPE_MASK)) == \
(IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
#define IFF_DUMPPKTS_RECV(sc, wh) \
(((sc->malo_debug & MALO_DEBUG_RECV) && \
- ((sc->malo_debug & MALO_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
- (sc->malo_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == \
- (IFF_DEBUG|IFF_LINK2))
+ ((sc->malo_debug & MALO_DEBUG_RECV_ALL) || !IS_BEACON(wh))))
#define IFF_DUMPPKTS_XMIT(sc) \
- ((sc->malo_debug & MALO_DEBUG_XMIT) || \
- (sc->malo_ifp->if_flags & (IFF_DEBUG | IFF_LINK2)) == \
- (IFF_DEBUG | IFF_LINK2))
+ (sc->malo_debug & MALO_DEBUG_XMIT)
#define DPRINTF(sc, m, fmt, ...) do { \
if (sc->malo_debug & (m)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, m, fmt, ...) do { \
(void) sc; \
} while (0)
#endif
static MALLOC_DEFINE(M_MALODEV, "malodev", "malo driver dma buffers");
static struct ieee80211vap *malo_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void malo_vap_delete(struct ieee80211vap *);
static int malo_dma_setup(struct malo_softc *);
static int malo_setup_hwdma(struct malo_softc *);
static void malo_txq_init(struct malo_softc *, struct malo_txq *, int);
static void malo_tx_cleanupq(struct malo_softc *, struct malo_txq *);
-static void malo_start(struct ifnet *);
+static void malo_parent(struct ieee80211com *);
+static int malo_transmit(struct ieee80211com *, struct mbuf *);
+static void malo_start(struct malo_softc *);
static void malo_watchdog(void *);
-static int malo_ioctl(struct ifnet *, u_long, caddr_t);
static void malo_updateslot(struct ieee80211com *);
static int malo_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void malo_scan_start(struct ieee80211com *);
static void malo_scan_end(struct ieee80211com *);
static void malo_set_channel(struct ieee80211com *);
static int malo_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void malo_sysctlattach(struct malo_softc *);
static void malo_announce(struct malo_softc *);
static void malo_dma_cleanup(struct malo_softc *);
-static void malo_stop_locked(struct ifnet *, int);
+static void malo_stop(struct malo_softc *);
static int malo_chan_set(struct malo_softc *, struct ieee80211_channel *);
static int malo_mode_init(struct malo_softc *);
static void malo_tx_proc(void *, int);
static void malo_rx_proc(void *, int);
static void malo_init(void *);
/*
* Read/Write shorthands for accesses to BAR 0. Note that all BAR 1
* operations are done in the "hal" except getting H/W MAC address at
* malo_attach and there should be no reference to them here.
*/
static uint32_t
malo_bar0_read4(struct malo_softc *sc, bus_size_t off)
{
return bus_space_read_4(sc->malo_io0t, sc->malo_io0h, off);
}
static void
malo_bar0_write4(struct malo_softc *sc, bus_size_t off, uint32_t val)
{
DPRINTF(sc, MALO_DEBUG_FW, "%s: off 0x%jx val 0x%x\n",
__func__, (uintmax_t)off, val);
bus_space_write_4(sc->malo_io0t, sc->malo_io0h, off, val);
}
int
malo_attach(uint16_t devid, struct malo_softc *sc)
{
- int error;
- struct ieee80211com *ic;
- struct ifnet *ifp;
+ struct ieee80211com *ic = &sc->malo_ic;
struct malo_hal *mh;
+ int error;
uint8_t bands;
- ifp = sc->malo_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->malo_dev, "can not if_alloc()\n");
- return ENOSPC;
- }
- ic = ifp->if_l2com;
-
MALO_LOCK_INIT(sc);
callout_init_mtx(&sc->malo_watchdog_timer, &sc->malo_mtx, 0);
+ mbufq_init(&sc->malo_snd, ifqmaxlen);
- /* set these up early for if_printf use */
- if_initname(ifp, device_get_name(sc->malo_dev),
- device_get_unit(sc->malo_dev));
-
mh = malo_hal_attach(sc->malo_dev, devid,
sc->malo_io1h, sc->malo_io1t, sc->malo_dmat);
if (mh == NULL) {
- if_printf(ifp, "unable to attach HAL\n");
+ device_printf(sc->malo_dev, "unable to attach HAL\n");
error = EIO;
goto bad;
}
sc->malo_mh = mh;
/*
* Load firmware so we can get setup. We arbitrarily pick station
* firmware; we'll re-load firmware as needed so setting up
* the wrong mode isn't a big deal.
*/
error = malo_hal_fwload(mh, "malo8335-h", "malo8335-m");
if (error != 0) {
- if_printf(ifp, "unable to setup firmware\n");
+ device_printf(sc->malo_dev, "unable to setup firmware\n");
goto bad1;
}
/* XXX gethwspecs() extracts correct informations? not maybe! */
error = malo_hal_gethwspecs(mh, &sc->malo_hwspecs);
if (error != 0) {
- if_printf(ifp, "unable to fetch h/w specs\n");
+ device_printf(sc->malo_dev, "unable to fetch h/w specs\n");
goto bad1;
}
DPRINTF(sc, MALO_DEBUG_FW,
"malo_hal_gethwspecs: hwversion 0x%x hostif 0x%x"
"maxnum_wcb 0x%x maxnum_mcaddr 0x%x maxnum_tx_wcb 0x%x"
"regioncode 0x%x num_antenna 0x%x fw_releasenum 0x%x"
"wcbbase0 0x%x rxdesc_read 0x%x rxdesc_write 0x%x"
"ul_fw_awakecookie 0x%x w[4] = %x %x %x %x",
sc->malo_hwspecs.hwversion,
sc->malo_hwspecs.hostinterface, sc->malo_hwspecs.maxnum_wcb,
sc->malo_hwspecs.maxnum_mcaddr, sc->malo_hwspecs.maxnum_tx_wcb,
sc->malo_hwspecs.regioncode, sc->malo_hwspecs.num_antenna,
sc->malo_hwspecs.fw_releasenum, sc->malo_hwspecs.wcbbase0,
sc->malo_hwspecs.rxdesc_read, sc->malo_hwspecs.rxdesc_write,
sc->malo_hwspecs.ul_fw_awakecookie,
sc->malo_hwspecs.wcbbase[0], sc->malo_hwspecs.wcbbase[1],
sc->malo_hwspecs.wcbbase[2], sc->malo_hwspecs.wcbbase[3]);
/* NB: firmware looks that it does not export regdomain info API. */
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
ieee80211_init_channels(ic, NULL, &bands);
sc->malo_txantenna = 0x2; /* h/w default */
sc->malo_rxantenna = 0xffff; /* h/w default */
/*
* Allocate tx + rx descriptors and populate the lists.
* We immediately push the information to the firmware
* as otherwise it gets upset.
*/
error = malo_dma_setup(sc);
if (error != 0) {
- if_printf(ifp, "failed to setup descriptors: %d\n", error);
+ device_printf(sc->malo_dev,
+ "failed to setup descriptors: %d\n", error);
goto bad1;
}
error = malo_setup_hwdma(sc); /* push to firmware */
if (error != 0) /* NB: malo_setupdma prints msg */
goto bad2;
sc->malo_tq = taskqueue_create_fast("malo_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->malo_tq);
taskqueue_start_threads(&sc->malo_tq, 1, PI_NET,
- "%s taskq", ifp->if_xname);
+ "%s taskq", device_get_nameunit(sc->malo_dev));
TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc);
TASK_INIT(&sc->malo_txtask, 0, malo_tx_proc, sc);
- ifp->if_softc = sc;
- ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
- ifp->if_start = malo_start;
- ifp->if_ioctl = malo_ioctl;
- ifp->if_init = malo_init;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(sc->malo_dev);
/* XXX not right but it's not used anywhere important */
ic->ic_phytype = IEEE80211_T_OFDM;
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_TXPMGT /* capable of txpow mgt */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
;
+ IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->malo_hwspecs.macaddr);
/*
* Transmit requires space in the packet for a special format transmit
* record and optional padding between this record and the payload.
* Ask the net80211 layer to arrange this when encapsulating
* packets so we can add it efficiently.
*/
ic->ic_headroom = sizeof(struct malo_txrec) -
sizeof(struct ieee80211_frame);
/* call MI attach routine. */
- ieee80211_ifattach(ic, sc->malo_hwspecs.macaddr);
+ ieee80211_ifattach(ic);
/* override default methods */
ic->ic_vap_create = malo_vap_create;
ic->ic_vap_delete = malo_vap_delete;
ic->ic_raw_xmit = malo_raw_xmit;
ic->ic_updateslot = malo_updateslot;
-
ic->ic_scan_start = malo_scan_start;
ic->ic_scan_end = malo_scan_end;
ic->ic_set_channel = malo_set_channel;
+ ic->ic_parent = malo_parent;
+ ic->ic_transmit = malo_transmit;
sc->malo_invalid = 0; /* ready to go, enable int handling */
ieee80211_radiotap_attach(ic,
&sc->malo_tx_th.wt_ihdr, sizeof(sc->malo_tx_th),
MALO_TX_RADIOTAP_PRESENT,
&sc->malo_rx_th.wr_ihdr, sizeof(sc->malo_rx_th),
MALO_RX_RADIOTAP_PRESENT);
/*
* Setup dynamic sysctl's.
*/
malo_sysctlattach(sc);
if (bootverbose)
ieee80211_announce(ic);
malo_announce(sc);
return 0;
bad2:
malo_dma_cleanup(sc);
bad1:
malo_hal_detach(mh);
bad:
- if_free(ifp);
sc->malo_invalid = 1;
return error;
}
static struct ieee80211vap *
malo_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
- struct ifnet *ifp = ic->ic_ifp;
+ struct malo_softc *sc = ic->ic_softc;
struct malo_vap *mvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) {
- if_printf(ifp, "multiple vaps not supported\n");
+ device_printf(sc->malo_dev, "multiple vaps not supported\n");
return NULL;
}
switch (opmode) {
case IEEE80211_M_STA:
if (opmode == IEEE80211_M_STA)
flags |= IEEE80211_CLONE_NOBEACONS;
/* fall thru... */
case IEEE80211_M_MONITOR:
break;
default:
- if_printf(ifp, "%s mode not supported\n",
+ device_printf(sc->malo_dev, "%s mode not supported\n",
ieee80211_opmode_name[opmode]);
return NULL; /* unsupported */
}
- mvp = (struct malo_vap *) malloc(sizeof(struct malo_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (mvp == NULL) {
- if_printf(ifp, "cannot allocate vap state block\n");
- return NULL;
- }
+ mvp = malloc(sizeof(struct malo_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &mvp->malo_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override state transition machine */
mvp->malo_newstate = vap->iv_newstate;
vap->iv_newstate = malo_newstate;
/* complete setup */
ieee80211_vap_attach(vap,
- ieee80211_media_change, ieee80211_media_status);
+ ieee80211_media_change, ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return vap;
}
static void
malo_vap_delete(struct ieee80211vap *vap)
{
struct malo_vap *mvp = MALO_VAP(vap);
ieee80211_vap_detach(vap);
free(mvp, M_80211_VAP);
}
int
malo_intr(void *arg)
{
struct malo_softc *sc = arg;
struct malo_hal *mh = sc->malo_mh;
uint32_t status;
if (sc->malo_invalid) {
/*
* The hardware is not ready/present, don't touch anything.
* Note this can happen early on if the IRQ is shared.
*/
DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
return (FILTER_STRAY);
}
/*
* Figure out the reason(s) for the interrupt.
*/
malo_hal_getisr(mh, &status); /* NB: clears ISR too */
if (status == 0) /* must be a shared irq */
return (FILTER_STRAY);
DPRINTF(sc, MALO_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
__func__, status, sc->malo_imask);
if (status & MALO_A2HRIC_BIT_RX_RDY)
taskqueue_enqueue_fast(sc->malo_tq, &sc->malo_rxtask);
if (status & MALO_A2HRIC_BIT_TX_DONE)
taskqueue_enqueue_fast(sc->malo_tq, &sc->malo_txtask);
if (status & MALO_A2HRIC_BIT_OPC_DONE)
malo_hal_cmddone(mh);
if (status & MALO_A2HRIC_BIT_MAC_EVENT)
;
if (status & MALO_A2HRIC_BIT_RX_PROBLEM)
;
if (status & MALO_A2HRIC_BIT_ICV_ERROR) {
/* TKIP ICV error */
sc->malo_stats.mst_rx_badtkipicv++;
}
#ifdef MALO_DEBUG
if (((status | sc->malo_imask) ^ sc->malo_imask) != 0)
DPRINTF(sc, MALO_DEBUG_INTR,
"%s: can't handle interrupt status 0x%x\n",
__func__, status);
#endif
return (FILTER_HANDLED);
}
static void
malo_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
bus_addr_t *paddr = (bus_addr_t*) arg;
KASSERT(error == 0, ("error %u on bus_dma callback", error));
*paddr = segs->ds_addr;
}
static int
malo_desc_setup(struct malo_softc *sc, const char *name,
struct malo_descdma *dd,
int nbuf, size_t bufsize, int ndesc, size_t descsize)
{
int error;
- struct ifnet *ifp = sc->malo_ifp;
uint8_t *ds;
DPRINTF(sc, MALO_DEBUG_RESET,
"%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
__func__, name, nbuf, (uintmax_t) bufsize,
ndesc, (uintmax_t) descsize);
dd->dd_name = name;
dd->dd_desc_len = nbuf * ndesc * descsize;
/*
* Setup DMA descriptor area.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->malo_dev),/* parent */
PAGE_SIZE, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dd->dd_desc_len, /* maxsize */
1, /* nsegments */
dd->dd_desc_len, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&dd->dd_dmat);
if (error != 0) {
- if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
+ device_printf(sc->malo_dev, "cannot allocate %s DMA tag\n",
+ dd->dd_name);
return error;
}
/* allocate descriptors */
error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dd->dd_dmamap);
if (error != 0) {
- if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
+ device_printf(sc->malo_dev,
+ "unable to alloc memory for %u %s descriptors, "
"error %u\n", nbuf * ndesc, dd->dd_name, error);
goto fail1;
}
error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
dd->dd_desc, dd->dd_desc_len,
malo_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT);
if (error != 0) {
- if_printf(ifp, "unable to map %s descriptors, error %u\n",
+ device_printf(sc->malo_dev,
+ "unable to map %s descriptors, error %u\n",
dd->dd_name, error);
goto fail2;
}
ds = dd->dd_desc;
memset(ds, 0, dd->dd_desc_len);
DPRINTF(sc, MALO_DEBUG_RESET,
"%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
__func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
(uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
return 0;
fail2:
bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
fail1:
bus_dma_tag_destroy(dd->dd_dmat);
memset(dd, 0, sizeof(*dd));
return error;
}
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
static int
malo_rxdma_setup(struct malo_softc *sc)
{
- struct ifnet *ifp = sc->malo_ifp;
int error, bsize, i;
struct malo_rxbuf *bf;
struct malo_rxdesc *ds;
error = malo_desc_setup(sc, "rx", &sc->malo_rxdma,
malo_rxbuf, sizeof(struct malo_rxbuf),
1, sizeof(struct malo_rxdesc));
if (error != 0)
return error;
/*
* Allocate rx buffers and set them up.
*/
bsize = malo_rxbuf * sizeof(struct malo_rxbuf);
bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO);
if (bf == NULL) {
- if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
+ device_printf(sc->malo_dev,
+ "malloc of %u rx buffers failed\n", bsize);
return error;
}
sc->malo_rxdma.dd_bufptr = bf;
STAILQ_INIT(&sc->malo_rxbuf);
ds = sc->malo_rxdma.dd_desc;
for (i = 0; i < malo_rxbuf; i++, bf++, ds++) {
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(&sc->malo_rxdma, ds);
error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT,
&bf->bf_dmamap);
if (error != 0) {
- if_printf(ifp, "%s: unable to dmamap for rx buffer, "
- "error %d\n", __func__, error);
+ device_printf(sc->malo_dev,
+ "%s: unable to dmamap for rx buffer, error %d\n",
+ __func__, error);
return error;
}
/* NB: tail is intentional to preserve descriptor order */
STAILQ_INSERT_TAIL(&sc->malo_rxbuf, bf, bf_list);
}
return 0;
}
static int
malo_txdma_setup(struct malo_softc *sc, struct malo_txq *txq)
{
- struct ifnet *ifp = sc->malo_ifp;
int error, bsize, i;
struct malo_txbuf *bf;
struct malo_txdesc *ds;
error = malo_desc_setup(sc, "tx", &txq->dma,
malo_txbuf, sizeof(struct malo_txbuf),
MALO_TXDESC, sizeof(struct malo_txdesc));
if (error != 0)
return error;
/* allocate and setup tx buffers */
bsize = malo_txbuf * sizeof(struct malo_txbuf);
bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO);
if (bf == NULL) {
- if_printf(ifp, "malloc of %u tx buffers failed\n",
+ device_printf(sc->malo_dev, "malloc of %u tx buffers failed\n",
malo_txbuf);
return ENOMEM;
}
txq->dma.dd_bufptr = bf;
STAILQ_INIT(&txq->free);
txq->nfree = 0;
ds = txq->dma.dd_desc;
for (i = 0; i < malo_txbuf; i++, bf++, ds += MALO_TXDESC) {
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(&txq->dma, ds);
error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT,
&bf->bf_dmamap);
if (error != 0) {
- if_printf(ifp, "unable to create dmamap for tx "
+ device_printf(sc->malo_dev,
+ "unable to create dmamap for tx "
"buffer %u, error %u\n", i, error);
return error;
}
STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
txq->nfree++;
}
return 0;
}
static void
malo_desc_cleanup(struct malo_softc *sc, struct malo_descdma *dd)
{
bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
bus_dma_tag_destroy(dd->dd_dmat);
memset(dd, 0, sizeof(*dd));
}
static void
malo_rxdma_cleanup(struct malo_softc *sc)
{
struct malo_rxbuf *bf;
STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) {
if (bf->bf_m != NULL) {
m_freem(bf->bf_m);
bf->bf_m = NULL;
}
if (bf->bf_dmamap != NULL) {
bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap);
bf->bf_dmamap = NULL;
}
}
STAILQ_INIT(&sc->malo_rxbuf);
if (sc->malo_rxdma.dd_bufptr != NULL) {
free(sc->malo_rxdma.dd_bufptr, M_MALODEV);
sc->malo_rxdma.dd_bufptr = NULL;
}
if (sc->malo_rxdma.dd_desc_len != 0)
malo_desc_cleanup(sc, &sc->malo_rxdma);
}
static void
malo_txdma_cleanup(struct malo_softc *sc, struct malo_txq *txq)
{
struct malo_txbuf *bf;
struct ieee80211_node *ni;
STAILQ_FOREACH(bf, &txq->free, bf_list) {
if (bf->bf_m != NULL) {
m_freem(bf->bf_m);
bf->bf_m = NULL;
}
ni = bf->bf_node;
bf->bf_node = NULL;
if (ni != NULL) {
/*
* Reclaim node reference.
*/
ieee80211_free_node(ni);
}
if (bf->bf_dmamap != NULL) {
bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap);
bf->bf_dmamap = NULL;
}
}
STAILQ_INIT(&txq->free);
txq->nfree = 0;
if (txq->dma.dd_bufptr != NULL) {
free(txq->dma.dd_bufptr, M_MALODEV);
txq->dma.dd_bufptr = NULL;
}
if (txq->dma.dd_desc_len != 0)
malo_desc_cleanup(sc, &txq->dma);
}
static void
malo_dma_cleanup(struct malo_softc *sc)
{
int i;
for (i = 0; i < MALO_NUM_TX_QUEUES; i++)
malo_txdma_cleanup(sc, &sc->malo_txq[i]);
malo_rxdma_cleanup(sc);
}
static int
malo_dma_setup(struct malo_softc *sc)
{
int error, i;
/* rxdma initializing. */
error = malo_rxdma_setup(sc);
if (error != 0)
return error;
/* NB: we just have 1 tx queue now. */
for (i = 0; i < MALO_NUM_TX_QUEUES; i++) {
error = malo_txdma_setup(sc, &sc->malo_txq[i]);
if (error != 0) {
malo_dma_cleanup(sc);
return error;
}
malo_txq_init(sc, &sc->malo_txq[i], i);
}
return 0;
}
static void
malo_hal_set_rxtxdma(struct malo_softc *sc)
{
int i;
malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read,
sc->malo_hwdma.rxdesc_read);
malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_write,
sc->malo_hwdma.rxdesc_read);
for (i = 0; i < MALO_NUM_TX_QUEUES; i++) {
malo_bar0_write4(sc,
sc->malo_hwspecs.wcbbase[i], sc->malo_hwdma.wcbbase[i]);
}
}
/*
* Inform firmware of our tx/rx dma setup. The BAR 0 writes below are
* for compatibility with older firmware. For current firmware we send
* this information with a cmd block via malo_hal_sethwdma.
*/
static int
malo_setup_hwdma(struct malo_softc *sc)
{
int i;
struct malo_txq *txq;
sc->malo_hwdma.rxdesc_read = sc->malo_rxdma.dd_desc_paddr;
for (i = 0; i < MALO_NUM_TX_QUEUES; i++) {
txq = &sc->malo_txq[i];
sc->malo_hwdma.wcbbase[i] = txq->dma.dd_desc_paddr;
}
sc->malo_hwdma.maxnum_txwcb = malo_txbuf;
sc->malo_hwdma.maxnum_wcb = MALO_NUM_TX_QUEUES;
malo_hal_set_rxtxdma(sc);
return 0;
}
static void
malo_txq_init(struct malo_softc *sc, struct malo_txq *txq, int qnum)
{
struct malo_txbuf *bf, *bn;
struct malo_txdesc *ds;
MALO_TXQ_LOCK_INIT(sc, txq);
txq->qnum = qnum;
txq->txpri = 0; /* XXX */
STAILQ_FOREACH(bf, &txq->free, bf_list) {
bf->bf_txq = txq;
ds = bf->bf_desc;
bn = STAILQ_NEXT(bf, bf_list);
if (bn == NULL)
bn = STAILQ_FIRST(&txq->free);
ds->physnext = htole32(bn->bf_daddr);
}
STAILQ_INIT(&txq->active);
}
/*
* Reclaim resources for a setup queue.
*/
static void
malo_tx_cleanupq(struct malo_softc *sc, struct malo_txq *txq)
{
/* XXX hal work? */
MALO_TXQ_LOCK_DESTROY(txq);
}
/*
* Allocate a tx buffer for sending a frame.
*/
static struct malo_txbuf *
malo_getbuf(struct malo_softc *sc, struct malo_txq *txq)
{
struct malo_txbuf *bf;
MALO_TXQ_LOCK(txq);
bf = STAILQ_FIRST(&txq->free);
if (bf != NULL) {
STAILQ_REMOVE_HEAD(&txq->free, bf_list);
txq->nfree--;
}
MALO_TXQ_UNLOCK(txq);
if (bf == NULL) {
DPRINTF(sc, MALO_DEBUG_XMIT,
"%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
sc->malo_stats.mst_tx_qstop++;
}
return bf;
}
static int
malo_tx_dmasetup(struct malo_softc *sc, struct malo_txbuf *bf, struct mbuf *m0)
{
struct mbuf *m;
int error;
/*
* Load the DMA map so any coalescing is done. This also calculates
* the number of descriptors we need.
*/
error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error == EFBIG) {
/* XXX packet requires too many descriptors */
bf->bf_nseg = MALO_TXDESC + 1;
} else if (error != 0) {
sc->malo_stats.mst_tx_busdma++;
m_freem(m0);
return error;
}
/*
* Discard null packets and check for packets that require too many
* TX descriptors. We try to convert the latter to a cluster.
*/
if (error == EFBIG) { /* too many desc's, linearize */
sc->malo_stats.mst_tx_linear++;
m = m_defrag(m0, M_NOWAIT);
if (m == NULL) {
m_freem(m0);
sc->malo_stats.mst_tx_nombuf++;
return ENOMEM;
}
m0 = m;
error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error != 0) {
sc->malo_stats.mst_tx_busdma++;
m_freem(m0);
return error;
}
KASSERT(bf->bf_nseg <= MALO_TXDESC,
("too many segments after defrag; nseg %u", bf->bf_nseg));
} else if (bf->bf_nseg == 0) { /* null packet, discard */
sc->malo_stats.mst_tx_nodata++;
m_freem(m0);
return EIO;
}
DPRINTF(sc, MALO_DEBUG_XMIT, "%s: m %p len %u\n",
__func__, m0, m0->m_pkthdr.len);
bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
bf->bf_m = m0;
return 0;
}
#ifdef MALO_DEBUG
static void
malo_printrxbuf(const struct malo_rxbuf *bf, u_int ix)
{
const struct malo_rxdesc *ds = bf->bf_desc;
uint32_t status = le32toh(ds->status);
printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
" STAT:%02x LEN:%04x SNR:%02x NF:%02x CHAN:%02x"
" RATE:%02x QOS:%04x\n", ix, ds, (uintmax_t)bf->bf_daddr,
le32toh(ds->physnext), le32toh(ds->physbuffdata),
ds->rxcontrol,
ds->rxcontrol != MALO_RXD_CTRL_DRIVER_OWN ?
"" : (status & MALO_RXD_STATUS_OK) ? " *" : " !",
ds->status, le16toh(ds->pktlen), ds->snr, ds->nf, ds->channel,
ds->rate, le16toh(ds->qosctrl));
}
static void
malo_printtxbuf(const struct malo_txbuf *bf, u_int qnum, u_int ix)
{
const struct malo_txdesc *ds = bf->bf_desc;
uint32_t status = le32toh(ds->status);
printf("Q%u[%3u]", qnum, ix);
printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
le32toh(ds->physnext),
le32toh(ds->pktptr), le16toh(ds->pktlen), status,
status & MALO_TXD_STATUS_USED ?
"" : (status & 3) != 0 ? " *" : " !");
printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
ds->datarate, ds->txpriority, le16toh(ds->qosctrl),
le32toh(ds->sap_pktinfo), le16toh(ds->format));
#if 0
{
const uint8_t *cp = (const uint8_t *) ds;
int i;
for (i = 0; i < sizeof(struct malo_txdesc); i++) {
printf("%02x ", cp[i]);
if (((i+1) % 16) == 0)
printf("\n");
}
printf("\n");
}
#endif
}
#endif /* MALO_DEBUG */
static __inline void
malo_updatetxrate(struct ieee80211_node *ni, int rix)
{
#define N(x) (sizeof(x)/sizeof(x[0]))
static const int ieeerates[] =
{ 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 96, 108 };
if (rix < N(ieeerates))
ni->ni_txrate = ieeerates[rix];
#undef N
}
static int
malo_fix2rate(int fix_rate)
{
#define N(x) (sizeof(x)/sizeof(x[0]))
static const int rates[] =
{ 2, 4, 11, 22, 12, 18, 24, 36, 48, 96, 108 };
return (fix_rate < N(rates) ? rates[fix_rate] : 0);
#undef N
}
/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
#define MS(v,x) (((v) & x) >> x##_S)
#define SM(v,x) (((v) << x##_S) & x)
/*
* Process completed xmit descriptors from the specified queue.
*/
static int
malo_tx_processq(struct malo_softc *sc, struct malo_txq *txq)
{
struct malo_txbuf *bf;
struct malo_txdesc *ds;
struct ieee80211_node *ni;
int nreaped;
uint32_t status;
DPRINTF(sc, MALO_DEBUG_TX_PROC, "%s: tx queue %u\n",
__func__, txq->qnum);
for (nreaped = 0;; nreaped++) {
MALO_TXQ_LOCK(txq);
bf = STAILQ_FIRST(&txq->active);
if (bf == NULL) {
MALO_TXQ_UNLOCK(txq);
break;
}
ds = bf->bf_desc;
MALO_TXDESC_SYNC(txq, ds,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if (ds->status & htole32(MALO_TXD_STATUS_FW_OWNED)) {
MALO_TXQ_UNLOCK(txq);
break;
}
STAILQ_REMOVE_HEAD(&txq->active, bf_list);
MALO_TXQ_UNLOCK(txq);
#ifdef MALO_DEBUG
if (sc->malo_debug & MALO_DEBUG_XMIT_DESC)
malo_printtxbuf(bf, txq->qnum, nreaped);
#endif
ni = bf->bf_node;
if (ni != NULL) {
status = le32toh(ds->status);
if (status & MALO_TXD_STATUS_OK) {
uint16_t format = le16toh(ds->format);
uint8_t txant = MS(format, MALO_TXD_ANTENNA);
sc->malo_stats.mst_ant_tx[txant]++;
if (status & MALO_TXD_STATUS_OK_RETRY)
sc->malo_stats.mst_tx_retries++;
if (status & MALO_TXD_STATUS_OK_MORE_RETRY)
sc->malo_stats.mst_tx_mretries++;
malo_updatetxrate(ni, ds->datarate);
sc->malo_stats.mst_tx_rate = ds->datarate;
} else {
if (status & MALO_TXD_STATUS_FAILED_LINK_ERROR)
sc->malo_stats.mst_tx_linkerror++;
if (status & MALO_TXD_STATUS_FAILED_XRETRY)
sc->malo_stats.mst_tx_xretries++;
if (status & MALO_TXD_STATUS_FAILED_AGING)
sc->malo_stats.mst_tx_aging++;
}
- /*
- * Do any tx complete callback. Note this must
- * be done before releasing the node reference.
- * XXX no way to figure out if frame was ACK'd
- */
- if (bf->bf_m->m_flags & M_TXCB) {
- /* XXX strip fw len in case header inspected */
- m_adj(bf->bf_m, sizeof(uint16_t));
- ieee80211_process_callback(ni, bf->bf_m,
- (status & MALO_TXD_STATUS_OK) == 0);
- }
- /*
- * Reclaim reference to node.
- *
- * NB: the node may be reclaimed here if, for example
- * this is a DEAUTH message that was sent and the
- * node was timed out due to inactivity.
- */
- ieee80211_free_node(ni);
- }
+ /* XXX strip fw len in case header inspected */
+ m_adj(bf->bf_m, sizeof(uint16_t));
+ ieee80211_tx_complete(ni, bf->bf_m,
+ (status & MALO_TXD_STATUS_OK) == 0);
+ } else
+ m_freem(bf->bf_m);
+
ds->status = htole32(MALO_TXD_STATUS_IDLE);
ds->pktlen = htole32(0);
bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap);
- m_freem(bf->bf_m);
bf->bf_m = NULL;
bf->bf_node = NULL;
MALO_TXQ_LOCK(txq);
STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
txq->nfree++;
MALO_TXQ_UNLOCK(txq);
}
return nreaped;
}
/*
* Deferred processing of transmit interrupt.
*/
static void
malo_tx_proc(void *arg, int npending)
{
struct malo_softc *sc = arg;
- struct ifnet *ifp = sc->malo_ifp;
int i, nreaped;
/*
* Process each active queue.
*/
nreaped = 0;
+ MALO_LOCK(sc);
for (i = 0; i < MALO_NUM_TX_QUEUES; i++) {
if (!STAILQ_EMPTY(&sc->malo_txq[i].active))
nreaped += malo_tx_processq(sc, &sc->malo_txq[i]);
}
if (nreaped != 0) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->malo_timer = 0;
- malo_start(ifp);
+ malo_start(sc);
}
+ MALO_UNLOCK(sc);
}
static int
malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni,
struct malo_txbuf *bf, struct mbuf *m0)
{
#define IEEE80211_DIR_DSTODS(wh) \
((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
#define IS_DATA_FRAME(wh) \
((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK)) == IEEE80211_FC0_TYPE_DATA)
int error, ismcast, iswep;
int copyhdrlen, hdrlen, pktlen;
struct ieee80211_frame *wh;
- struct ifnet *ifp = sc->malo_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->malo_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct malo_txdesc *ds;
struct malo_txrec *tr;
struct malo_txq *txq;
uint16_t qos;
wh = mtod(m0, struct ieee80211_frame *);
iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
copyhdrlen = hdrlen = ieee80211_anyhdrsize(wh);
pktlen = m0->m_pkthdr.len;
if (IEEE80211_QOS_HAS_SEQ(wh)) {
if (IEEE80211_DIR_DSTODS(wh)) {
qos = *(uint16_t *)
(((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
copyhdrlen -= sizeof(qos);
} else
qos = *(uint16_t *)
(((struct ieee80211_qosframe *) wh)->i_qos);
} else
qos = 0;
if (iswep) {
struct ieee80211_key *k;
/*
* Construct the 802.11 header+trailer for an encrypted
* frame. The only reason this can fail is because of an
* unknown or unsupported cipher/key type.
*
* NB: we do this even though the firmware will ignore
* what we've done for WEP and TKIP as we need the
* ExtIV filled in for CCMP and this also adjusts
* the headers which simplifies our work below.
*/
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
/*
* This can happen when the key is yanked after the
* frame was queued. Just discard the frame; the
* 802.11 layer counts failures and provides
* debugging/diagnostics.
*/
m_freem(m0);
return EIO;
}
/*
* Adjust the packet length for the crypto additions
* done during encap and any other bits that the f/w
* will add later on.
*/
pktlen = m0->m_pkthdr.len;
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
sc->malo_tx_th.wt_flags = 0; /* XXX */
if (iswep)
sc->malo_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
sc->malo_tx_th.wt_txpower = ni->ni_txpower;
sc->malo_tx_th.wt_antenna = sc->malo_txantenna;
ieee80211_radiotap_tx(vap, m0);
}
/*
* Copy up/down the 802.11 header; the firmware requires
* we present a 2-byte payload length followed by a
* 4-address header (w/o QoS), followed (optionally) by
* any WEP/ExtIV header (but only filled in for CCMP).
* We are assured the mbuf has sufficient headroom to
* prepend in-place by the setup of ic_headroom in
* malo_attach.
*/
if (hdrlen < sizeof(struct malo_txrec)) {
const int space = sizeof(struct malo_txrec) - hdrlen;
if (M_LEADINGSPACE(m0) < space) {
/* NB: should never happen */
device_printf(sc->malo_dev,
"not enough headroom, need %d found %zd, "
"m_flags 0x%x m_len %d\n",
space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
ieee80211_dump_pkt(ic,
mtod(m0, const uint8_t *), m0->m_len, 0, -1);
m_freem(m0);
/* XXX stat */
return EIO;
}
M_PREPEND(m0, space, M_NOWAIT);
}
tr = mtod(m0, struct malo_txrec *);
if (wh != (struct ieee80211_frame *) &tr->wh)
ovbcopy(wh, &tr->wh, hdrlen);
/*
* Note: the "firmware length" is actually the length of the fully
* formed "802.11 payload". That is, it's everything except for
* the 802.11 header. In particular this includes all crypto
* material including the MIC!
*/
tr->fwlen = htole16(pktlen - hdrlen);
/*
* Load the DMA map so any coalescing is done. This
* also calculates the number of descriptors we need.
*/
error = malo_tx_dmasetup(sc, bf, m0);
if (error != 0)
return error;
bf->bf_node = ni; /* NB: held reference */
m0 = bf->bf_m; /* NB: may have changed */
tr = mtod(m0, struct malo_txrec *);
wh = (struct ieee80211_frame *)&tr->wh;
/*
* Formulate tx descriptor.
*/
ds = bf->bf_desc;
txq = bf->bf_txq;
ds->qosctrl = qos; /* NB: already little-endian */
ds->pktptr = htole32(bf->bf_segs[0].ds_addr);
ds->pktlen = htole16(bf->bf_segs[0].ds_len);
/* NB: pPhysNext setup once, don't touch */
ds->datarate = IS_DATA_FRAME(wh) ? 1 : 0;
ds->sap_pktinfo = 0;
ds->format = 0;
/*
* Select transmit rate.
*/
switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
case IEEE80211_FC0_TYPE_MGT:
sc->malo_stats.mst_tx_mgmt++;
/* fall thru... */
case IEEE80211_FC0_TYPE_CTL:
ds->txpriority = 1;
break;
case IEEE80211_FC0_TYPE_DATA:
ds->txpriority = txq->qnum;
break;
default:
- if_printf(ifp, "bogus frame type 0x%x (%s)\n",
+ device_printf(sc->malo_dev, "bogus frame type 0x%x (%s)\n",
wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
/* XXX statistic */
m_freem(m0);
return EIO;
}
#ifdef MALO_DEBUG
if (IFF_DUMPPKTS_XMIT(sc))
ieee80211_dump_pkt(ic,
mtod(m0, const uint8_t *)+sizeof(uint16_t),
m0->m_len - sizeof(uint16_t), ds->datarate, -1);
#endif
MALO_TXQ_LOCK(txq);
if (!IS_DATA_FRAME(wh))
ds->status |= htole32(1);
ds->status |= htole32(MALO_TXD_STATUS_FW_OWNED);
STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
MALO_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
sc->malo_timer = 5;
MALO_TXQ_UNLOCK(txq);
return 0;
#undef IEEE80211_DIR_DSTODS
}
+static int
+malo_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct malo_softc *sc = ic->ic_softc;
+ int error;
+
+ MALO_LOCK(sc);
+ if (!sc->malo_running) {
+ MALO_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->malo_snd, m);
+ if (error) {
+ MALO_UNLOCK(sc);
+ return (error);
+ }
+ malo_start(sc);
+ MALO_UNLOCK(sc);
+ return (0);
+}
+
static void
-malo_start(struct ifnet *ifp)
+malo_start(struct malo_softc *sc)
{
- struct malo_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct malo_txq *txq = &sc->malo_txq[0];
struct malo_txbuf *bf = NULL;
struct mbuf *m;
int nqueued = 0;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->malo_invalid)
+ MALO_LOCK_ASSERT(sc);
+
+ if (!sc->malo_running || sc->malo_invalid)
return;
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
+ while ((m = mbufq_dequeue(&sc->malo_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
bf = malo_getbuf(sc, txq);
if (bf == NULL) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
-
- /* XXX blocks other traffic */
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ mbufq_prepend(&sc->malo_snd, m);
sc->malo_stats.mst_tx_qstop++;
break;
}
/*
* Pass the frame to the h/w for transmission.
*/
if (malo_tx_start(sc, ni, bf, m)) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
if (bf != NULL) {
bf->bf_m = NULL;
bf->bf_node = NULL;
MALO_TXQ_LOCK(txq);
STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
MALO_TXQ_UNLOCK(txq);
}
ieee80211_free_node(ni);
continue;
}
nqueued++;
if (nqueued >= malo_txcoalesce) {
/*
* Poke the firmware to process queued frames;
* see below about (lack of) locking.
*/
nqueued = 0;
malo_hal_txstart(sc->malo_mh, 0/*XXX*/);
}
}
if (nqueued) {
/*
* NB: We don't need to lock against tx done because
* this just prods the firmware to check the transmit
* descriptors. The firmware will also start fetching
* descriptors by itself if it notices new ones are
* present when it goes to deliver a tx done interrupt
* to the host. So if we race with tx done processing
* it's ok. Delivering the kick here rather than in
* malo_tx_start is an optimization to avoid poking the
* firmware for each packet.
*
* NB: the queue id isn't used so 0 is ok.
*/
malo_hal_txstart(sc->malo_mh, 0/*XXX*/);
}
}
static void
malo_watchdog(void *arg)
{
- struct malo_softc *sc;
- struct ifnet *ifp;
+ struct malo_softc *sc = arg;
- sc = arg;
callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc);
if (sc->malo_timer == 0 || --sc->malo_timer > 0)
return;
- ifp = sc->malo_ifp;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->malo_invalid) {
- if_printf(ifp, "watchdog timeout\n");
+ if (sc->malo_running && !sc->malo_invalid) {
+ device_printf(sc->malo_dev, "watchdog timeout\n");
/* XXX no way to reset h/w. now */
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->malo_ic.ic_oerrors, 1);
sc->malo_stats.mst_watchdog++;
}
}
static int
malo_hal_reset(struct malo_softc *sc)
{
static int first = 0;
- struct ifnet *ifp = sc->malo_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->malo_ic;
struct malo_hal *mh = sc->malo_mh;
if (first == 0) {
/*
* NB: when the device firstly is initialized, sometimes
* firmware could override rx/tx dma registers so we re-set
* these values once.
*/
malo_hal_set_rxtxdma(sc);
first = 1;
}
malo_hal_setantenna(mh, MHA_ANTENNATYPE_RX, sc->malo_rxantenna);
malo_hal_setantenna(mh, MHA_ANTENNATYPE_TX, sc->malo_txantenna);
malo_hal_setradio(mh, 1, MHP_AUTO_PREAMBLE);
malo_chan_set(sc, ic->ic_curchan);
/* XXX needs other stuffs? */
return 1;
}
static __inline struct mbuf *
malo_getrxmbuf(struct malo_softc *sc, struct malo_rxbuf *bf)
{
struct mbuf *m;
bus_addr_t paddr;
int error;
/* XXX don't need mbuf, just dma buffer */
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
if (m == NULL) {
sc->malo_stats.mst_rx_nombuf++; /* XXX */
return NULL;
}
error = bus_dmamap_load(sc->malo_dmat, bf->bf_dmamap,
mtod(m, caddr_t), MJUMPAGESIZE,
malo_load_cb, &paddr, BUS_DMA_NOWAIT);
if (error != 0) {
- if_printf(sc->malo_ifp,
+ device_printf(sc->malo_dev,
"%s: bus_dmamap_load failed, error %d\n", __func__, error);
m_freem(m);
return NULL;
}
bf->bf_data = paddr;
bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
return m;
}
static int
malo_rxbuf_init(struct malo_softc *sc, struct malo_rxbuf *bf)
{
struct malo_rxdesc *ds;
ds = bf->bf_desc;
if (bf->bf_m == NULL) {
bf->bf_m = malo_getrxmbuf(sc, bf);
if (bf->bf_m == NULL) {
/* mark descriptor to be skipped */
ds->rxcontrol = MALO_RXD_CTRL_OS_OWN;
/* NB: don't need PREREAD */
MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
return ENOMEM;
}
}
/*
* Setup descriptor.
*/
ds->qosctrl = 0;
ds->snr = 0;
ds->status = MALO_RXD_STATUS_IDLE;
ds->channel = 0;
ds->pktlen = htole16(MALO_RXSIZE);
ds->nf = 0;
ds->physbuffdata = htole32(bf->bf_data);
/* NB: don't touch pPhysNext, set once */
ds->rxcontrol = MALO_RXD_CTRL_DRIVER_OWN;
MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return 0;
}
/*
* Setup the rx data structures. This should only be done once or we may get
* out of sync with the firmware.
*/
static int
malo_startrecv(struct malo_softc *sc)
{
struct malo_rxbuf *bf, *prev;
struct malo_rxdesc *ds;
if (sc->malo_recvsetup == 1) {
malo_mode_init(sc); /* set filters, etc. */
return 0;
}
prev = NULL;
STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) {
int error = malo_rxbuf_init(sc, bf);
if (error != 0) {
DPRINTF(sc, MALO_DEBUG_RECV,
"%s: malo_rxbuf_init failed %d\n",
__func__, error);
return error;
}
if (prev != NULL) {
ds = prev->bf_desc;
ds->physnext = htole32(bf->bf_daddr);
}
prev = bf;
}
if (prev != NULL) {
ds = prev->bf_desc;
ds->physnext =
htole32(STAILQ_FIRST(&sc->malo_rxbuf)->bf_daddr);
}
sc->malo_recvsetup = 1;
malo_mode_init(sc); /* set filters, etc. */
return 0;
}
static void
malo_init_locked(struct malo_softc *sc)
{
- struct ifnet *ifp = sc->malo_ifp;
struct malo_hal *mh = sc->malo_mh;
int error;
- DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags 0x%x\n",
- __func__, ifp->if_flags);
-
MALO_LOCK_ASSERT(sc);
/*
* Stop anything previously setup. This is safe whether this is
* the first time through or not.
*/
- malo_stop_locked(ifp, 0);
+ malo_stop(sc);
/*
* Push state to the firmware.
*/
if (!malo_hal_reset(sc)) {
- if_printf(ifp, "%s: unable to reset hardware\n", __func__);
+ device_printf(sc->malo_dev,
+ "%s: unable to reset hardware\n", __func__);
return;
}
/*
* Setup recv (once); transmit is already good to go.
*/
error = malo_startrecv(sc);
if (error != 0) {
- if_printf(ifp, "%s: unable to start recv logic, error %d\n",
+ device_printf(sc->malo_dev,
+ "%s: unable to start recv logic, error %d\n",
__func__, error);
return;
}
/*
* Enable interrupts.
*/
sc->malo_imask = MALO_A2HRIC_BIT_RX_RDY
| MALO_A2HRIC_BIT_TX_DONE
| MALO_A2HRIC_BIT_OPC_DONE
| MALO_A2HRIC_BIT_MAC_EVENT
| MALO_A2HRIC_BIT_RX_PROBLEM
| MALO_A2HRIC_BIT_ICV_ERROR
| MALO_A2HRIC_BIT_RADAR_DETECT
| MALO_A2HRIC_BIT_CHAN_SWITCH;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->malo_running = 1;
malo_hal_intrset(mh, sc->malo_imask);
callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc);
}
static void
malo_init(void *arg)
{
struct malo_softc *sc = (struct malo_softc *) arg;
- struct ifnet *ifp = sc->malo_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->malo_ic;
- DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags 0x%x\n",
- __func__, ifp->if_flags);
-
MALO_LOCK(sc);
malo_init_locked(sc);
-
MALO_UNLOCK(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->malo_running)
ieee80211_start_all(ic); /* start all vap's */
}
/*
* Set the multicast filter contents into the hardware.
*/
static void
malo_setmcastfilter(struct malo_softc *sc)
{
- struct ifnet *ifp = sc->malo_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifmultiaddr *ifma;
+ struct ieee80211com *ic = &sc->malo_ic;
+ struct ieee80211vap *vap;
uint8_t macs[IEEE80211_ADDR_LEN * MALO_HAL_MCAST_MAX];
uint8_t *mp;
int nmc;
mp = macs;
nmc = 0;
- if (ic->ic_opmode == IEEE80211_M_MONITOR ||
- (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)))
+ if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_allmulti > 0 ||
+ ic->ic_promisc > 0)
goto all;
-
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (nmc == MALO_HAL_MCAST_MAX) {
- ifp->if_flags |= IFF_ALLMULTI;
- if_maddr_runlock(ifp);
- goto all;
- }
- IEEE80211_ADDR_COPY(mp,
- LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
- mp += IEEE80211_ADDR_LEN, nmc++;
+ ifp = vap->iv_ifp;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+
+ if (nmc == MALO_HAL_MCAST_MAX) {
+ ifp->if_flags |= IFF_ALLMULTI;
+ if_maddr_runlock(ifp);
+ goto all;
+ }
+ IEEE80211_ADDR_COPY(mp,
+ LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
+
+ mp += IEEE80211_ADDR_LEN, nmc++;
+ }
+ if_maddr_runlock(ifp);
}
- if_maddr_runlock(ifp);
malo_hal_setmcast(sc->malo_mh, nmc, macs);
all:
/*
* XXX we don't know how to set the f/w for supporting
* IFF_ALLMULTI | IFF_PROMISC cases
*/
return;
}
static int
malo_mode_init(struct malo_softc *sc)
{
- struct ifnet *ifp = sc->malo_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->malo_ic;
struct malo_hal *mh = sc->malo_mh;
/*
* NB: Ignore promisc in hostap mode; it's set by the
* bridge. This is wrong but we have no way to
* identify internal requests (from the bridge)
* versus external requests such as for tcpdump.
*/
- malo_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
+ malo_hal_setpromisc(mh, ic->ic_promisc > 0 &&
ic->ic_opmode != IEEE80211_M_HOSTAP);
malo_setmcastfilter(sc);
return ENXIO;
}
static void
malo_tx_draintxq(struct malo_softc *sc, struct malo_txq *txq)
{
struct ieee80211_node *ni;
struct malo_txbuf *bf;
u_int ix;
/*
* NB: this assumes output has been stopped and
* we do not need to block malo_tx_tasklet
*/
for (ix = 0;; ix++) {
MALO_TXQ_LOCK(txq);
bf = STAILQ_FIRST(&txq->active);
if (bf == NULL) {
MALO_TXQ_UNLOCK(txq);
break;
}
STAILQ_REMOVE_HEAD(&txq->active, bf_list);
MALO_TXQ_UNLOCK(txq);
#ifdef MALO_DEBUG
if (sc->malo_debug & MALO_DEBUG_RESET) {
- struct ifnet *ifp = sc->malo_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->malo_ic;
const struct malo_txrec *tr =
mtod(bf->bf_m, const struct malo_txrec *);
malo_printtxbuf(bf, txq->qnum, ix);
ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
}
#endif /* MALO_DEBUG */
bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap);
ni = bf->bf_node;
bf->bf_node = NULL;
if (ni != NULL) {
/*
* Reclaim node reference.
*/
ieee80211_free_node(ni);
}
m_freem(bf->bf_m);
bf->bf_m = NULL;
MALO_TXQ_LOCK(txq);
STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
txq->nfree++;
MALO_TXQ_UNLOCK(txq);
}
}
static void
-malo_stop_locked(struct ifnet *ifp, int disable)
+malo_stop(struct malo_softc *sc)
{
- struct malo_softc *sc = ifp->if_softc;
struct malo_hal *mh = sc->malo_mh;
int i;
- DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
- __func__, sc->malo_invalid, ifp->if_flags);
+ DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid %u running %u\n",
+ __func__, sc->malo_invalid, sc->malo_running);
MALO_LOCK_ASSERT(sc);
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ if (!sc->malo_running)
return;
/*
* Shutdown the hardware and driver:
* disable interrupts
* turn off the radio
* drain and release tx queues
*
* Note that some of this work is not possible if the hardware
* is gone (invalid).
*/
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ sc->malo_running = 0;
callout_stop(&sc->malo_watchdog_timer);
sc->malo_timer = 0;
- /* diable interrupt. */
+ /* disable interrupt. */
malo_hal_intrset(mh, 0);
/* turn off the radio. */
malo_hal_setradio(mh, 0, MHP_AUTO_PREAMBLE);
/* drain and release tx queues. */
for (i = 0; i < MALO_NUM_TX_QUEUES; i++)
malo_tx_draintxq(sc, &sc->malo_txq[i]);
}
-static int
-malo_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+malo_parent(struct ieee80211com *ic)
{
-#define MALO_IS_RUNNING(ifp) \
- ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
- struct malo_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ struct malo_softc *sc = ic->ic_softc;
+ int startall = 0;
MALO_LOCK(sc);
- switch (cmd) {
- case SIOCSIFFLAGS:
- if (MALO_IS_RUNNING(ifp)) {
- /*
- * To avoid rescanning another access point,
- * do not call malo_init() here. Instead,
- * only reflect promisc mode settings.
- */
- malo_mode_init(sc);
- } else if (ifp->if_flags & IFF_UP) {
- /*
- * Beware of being called during attach/detach
- * to reset promiscuous mode. In that case we
- * will still be marked UP but not RUNNING.
- * However trying to re-init the interface
- * is the wrong thing to do as we've already
- * torn down much of our state. There's
- * probably a better way to deal with this.
- */
- if (!sc->malo_invalid) {
- malo_init_locked(sc);
- startall = 1;
- }
- } else
- malo_stop_locked(ifp, 1);
- break;
- case SIOCGIFMEDIA:
- case SIOCSIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- default:
- error = ether_ioctl(ifp, cmd, data);
- break;
- }
+ if (ic->ic_nrunning > 0) {
+ /*
+ * Beware of being called during attach/detach
+ * to reset promiscuous mode. In that case we
+ * will still be marked UP but not RUNNING.
+ * However trying to re-init the interface
+ * is the wrong thing to do as we've already
+ * torn down much of our state. There's
+ * probably a better way to deal with this.
+ */
+ if (!sc->malo_running && !sc->malo_invalid) {
+ malo_init(sc);
+ startall = 1;
+ }
+ /*
+ * To avoid rescanning another access point,
+ * do not call malo_init() here. Instead,
+ * only reflect promisc mode settings.
+ */
+ malo_mode_init(sc);
+ } else if (sc->malo_running)
+ malo_stop(sc);
MALO_UNLOCK(sc);
-
if (startall)
ieee80211_start_all(ic);
- return error;
-#undef MALO_IS_RUNNING
}
/*
* Callback from the 802.11 layer to update the slot time
* based on the current setting. We use it to notify the
* firmware of ERP changes and the f/w takes care of things
* like slot time and preamble.
*/
static void
malo_updateslot(struct ieee80211com *ic)
{
struct malo_softc *sc = ic->ic_softc;
struct malo_hal *mh = sc->malo_mh;
int error;
/* NB: can be called early; suppress needless cmds */
- if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ if (!sc->malo_running)
return;
DPRINTF(sc, MALO_DEBUG_RESET,
"%s: chan %u MHz/flags 0x%x %s slot, (ic_flags 0x%x)\n",
__func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", ic->ic_flags);
if (ic->ic_flags & IEEE80211_F_SHSLOT)
error = malo_hal_set_slot(mh, 1);
else
error = malo_hal_set_slot(mh, 0);
if (error != 0)
device_printf(sc->malo_dev, "setting %s slot failed\n",
ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long");
}
static int
malo_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ieee80211com *ic = vap->iv_ic;
- struct malo_softc *sc = ic->ic_ifp->if_softc;
+ struct malo_softc *sc = ic->ic_softc;
struct malo_hal *mh = sc->malo_mh;
int error;
DPRINTF(sc, MALO_DEBUG_STATE, "%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]);
/*
* Invoke the net80211 layer first so iv_bss is setup.
*/
error = MALO_VAP(vap)->malo_newstate(vap, nstate, arg);
if (error != 0)
return error;
if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) {
struct ieee80211_node *ni = vap->iv_bss;
enum ieee80211_phymode mode = ieee80211_chan2mode(ni->ni_chan);
const struct ieee80211_txparam *tp = &vap->iv_txparms[mode];
DPRINTF(sc, MALO_DEBUG_STATE,
"%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
"capinfo 0x%04x chan %d associd 0x%x mode %d rate %d\n",
vap->iv_ifp->if_xname, __func__, vap->iv_flags,
ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
ieee80211_chan2ieee(ic, ic->ic_curchan),
ni->ni_associd, mode, tp->ucastrate);
malo_hal_setradio(mh, 1,
(ic->ic_flags & IEEE80211_F_SHPREAMBLE) ?
MHP_SHORT_PREAMBLE : MHP_LONG_PREAMBLE);
malo_hal_setassocid(sc->malo_mh, ni->ni_bssid, ni->ni_associd);
malo_hal_set_rate(mh, mode,
tp->ucastrate == IEEE80211_FIXED_RATE_NONE ?
0 : malo_fix2rate(tp->ucastrate));
}
return 0;
}
static int
malo_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct malo_softc *sc = ifp->if_softc;
+ struct malo_softc *sc = ic->ic_softc;
struct malo_txbuf *bf;
struct malo_txq *txq;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->malo_invalid) {
+ if (!sc->malo_running || sc->malo_invalid) {
ieee80211_free_node(ni);
m_freem(m);
return ENETDOWN;
}
/*
* Grab a TX buffer and associated resources. Note that we depend
* on the classification by the 802.11 layer to get to the right h/w
* queue. Management frames must ALWAYS go on queue 1 but we
* cannot just force that here because we may receive non-mgt frames.
*/
txq = &sc->malo_txq[0];
bf = malo_getbuf(sc, txq);
if (bf == NULL) {
- /* XXX blocks other traffic */
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
ieee80211_free_node(ni);
m_freem(m);
return ENOBUFS;
}
/*
* Pass the frame to the h/w for transmission.
*/
if (malo_tx_start(sc, ni, bf, m) != 0) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
bf->bf_m = NULL;
bf->bf_node = NULL;
MALO_TXQ_LOCK(txq);
STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
txq->nfree++;
MALO_TXQ_UNLOCK(txq);
ieee80211_free_node(ni);
return EIO; /* XXX */
}
/*
* NB: We don't need to lock against tx done because this just
* prods the firmware to check the transmit descriptors. The firmware
* will also start fetching descriptors by itself if it notices
* new ones are present when it goes to deliver a tx done interrupt
* to the host. So if we race with tx done processing it's ok.
* Delivering the kick here rather than in malo_tx_start is
* an optimization to avoid poking the firmware for each packet.
*
* NB: the queue id isn't used so 0 is ok.
*/
malo_hal_txstart(sc->malo_mh, 0/*XXX*/);
return 0;
}
static void
malo_sysctlattach(struct malo_softc *sc)
{
#ifdef MALO_DEBUG
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->malo_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->malo_dev);
sc->malo_debug = malo_debug;
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"debug", CTLFLAG_RW, &sc->malo_debug, 0,
"control debugging printfs");
#endif
}
static void
malo_announce(struct malo_softc *sc)
{
- struct ifnet *ifp = sc->malo_ifp;
- if_printf(ifp, "versions [hw %d fw %d.%d.%d.%d] (regioncode %d)\n",
+ device_printf(sc->malo_dev,
+ "versions [hw %d fw %d.%d.%d.%d] (regioncode %d)\n",
sc->malo_hwspecs.hwversion,
(sc->malo_hwspecs.fw_releasenum >> 24) & 0xff,
(sc->malo_hwspecs.fw_releasenum >> 16) & 0xff,
(sc->malo_hwspecs.fw_releasenum >> 8) & 0xff,
(sc->malo_hwspecs.fw_releasenum >> 0) & 0xff,
sc->malo_hwspecs.regioncode);
if (bootverbose || malo_rxbuf != MALO_RXBUF)
- if_printf(ifp, "using %u rx buffers\n", malo_rxbuf);
+ device_printf(sc->malo_dev,
+ "using %u rx buffers\n", malo_rxbuf);
if (bootverbose || malo_txbuf != MALO_TXBUF)
- if_printf(ifp, "using %u tx buffers\n", malo_txbuf);
+ device_printf(sc->malo_dev,
+ "using %u tx buffers\n", malo_txbuf);
}
/*
* Convert net80211 channel to a HAL channel.
*/
static void
malo_mapchan(struct malo_hal_channel *hc, const struct ieee80211_channel *chan)
{
hc->channel = chan->ic_ieee;
*(uint32_t *)&hc->flags = 0;
if (IEEE80211_IS_CHAN_2GHZ(chan))
hc->flags.freqband = MALO_FREQ_BAND_2DOT4GHZ;
}
/*
* Set/change channels. If the channel is really being changed,
* it's done by reseting the chip. To accomplish this we must
* first cleanup any pending DMA, then restart stuff after a la
* malo_init.
*/
static int
malo_chan_set(struct malo_softc *sc, struct ieee80211_channel *chan)
{
struct malo_hal *mh = sc->malo_mh;
struct malo_hal_channel hchan;
DPRINTF(sc, MALO_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
__func__, chan->ic_freq, chan->ic_flags);
/*
* Convert to a HAL channel description with the flags constrained
* to reflect the current operating mode.
*/
malo_mapchan(&hchan, chan);
malo_hal_intrset(mh, 0); /* disable interrupts */
malo_hal_setchannel(mh, &hchan);
malo_hal_settxpower(mh, &hchan);
/*
* Update internal state.
*/
sc->malo_tx_th.wt_chan_freq = htole16(chan->ic_freq);
sc->malo_rx_th.wr_chan_freq = htole16(chan->ic_freq);
if (IEEE80211_IS_CHAN_ANYG(chan)) {
sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
} else {
sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
}
sc->malo_curchan = hchan;
malo_hal_intrset(mh, sc->malo_imask);
return 0;
}
static void
malo_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct malo_softc *sc = ifp->if_softc;
+ struct malo_softc *sc = ic->ic_softc;
DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__);
}
static void
malo_scan_end(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct malo_softc *sc = ifp->if_softc;
+ struct malo_softc *sc = ic->ic_softc;
DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__);
}
static void
malo_set_channel(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct malo_softc *sc = ifp->if_softc;
+ struct malo_softc *sc = ic->ic_softc;
(void) malo_chan_set(sc, ic->ic_curchan);
}
static void
malo_rx_proc(void *arg, int npending)
{
#define IEEE80211_DIR_DSTODS(wh) \
((((const struct ieee80211_frame *)wh)->i_fc[1] & \
IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
struct malo_softc *sc = arg;
- struct ifnet *ifp = sc->malo_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->malo_ic;
struct malo_rxbuf *bf;
struct malo_rxdesc *ds;
struct mbuf *m, *mnew;
struct ieee80211_qosframe *wh;
struct ieee80211_qosframe_addr4 *wh4;
struct ieee80211_node *ni;
int off, len, hdrlen, pktlen, rssi, ntodo;
uint8_t *data, status;
uint32_t readptr, writeptr;
DPRINTF(sc, MALO_DEBUG_RX_PROC,
"%s: pending %u rdptr(0x%x) 0x%x wrptr(0x%x) 0x%x\n",
__func__, npending,
sc->malo_hwspecs.rxdesc_read,
malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read),
sc->malo_hwspecs.rxdesc_write,
malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write));
readptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read);
writeptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write);
if (readptr == writeptr)
return;
bf = sc->malo_rxnext;
for (ntodo = malo_rxquota; ntodo > 0 && readptr != writeptr; ntodo--) {
if (bf == NULL) {
bf = STAILQ_FIRST(&sc->malo_rxbuf);
break;
}
ds = bf->bf_desc;
if (bf->bf_m == NULL) {
/*
* If data allocation failed previously there
* will be no buffer; try again to re-populate it.
* Note the firmware will not advance to the next
* descriptor with a dma buffer so we must mimic
* this or we'll get out of sync.
*/
DPRINTF(sc, MALO_DEBUG_ANY,
"%s: rx buf w/o dma memory\n", __func__);
(void)malo_rxbuf_init(sc, bf);
break;
}
MALO_RXDESC_SYNC(sc, ds,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if (ds->rxcontrol != MALO_RXD_CTRL_DMA_OWN)
break;
readptr = le32toh(ds->physnext);
#ifdef MALO_DEBUG
if (sc->malo_debug & MALO_DEBUG_RECV_DESC)
malo_printrxbuf(bf, 0);
#endif
status = ds->status;
if (status & MALO_RXD_STATUS_DECRYPT_ERR_MASK) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto rx_next;
}
/*
* Sync the data buffer.
*/
len = le16toh(ds->pktlen);
bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap,
BUS_DMASYNC_POSTREAD);
/*
* The 802.11 header is provided all or in part at the front;
* use it to calculate the true size of the header that we'll
* construct below. We use this to figure out where to copy
* payload prior to constructing the header.
*/
m = bf->bf_m;
data = mtod(m, uint8_t *);
hdrlen = ieee80211_anyhdrsize(data + sizeof(uint16_t));
off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
/*
* Calculate RSSI. XXX wrong
*/
rssi = 2 * ((int) ds->snr - ds->nf); /* NB: .5 dBm */
if (rssi > 100)
rssi = 100;
pktlen = hdrlen + (len - off);
/*
* NB: we know our frame is at least as large as
* IEEE80211_MIN_LEN because there is a 4-address frame at
* the front. Hence there's no need to vet the packet length.
* If the frame in fact is too small it should be discarded
* at the net80211 layer.
*/
/* XXX don't need mbuf, just dma buffer */
mnew = malo_getrxmbuf(sc, bf);
if (mnew == NULL) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto rx_next;
}
/*
* Attach the dma buffer to the mbuf; malo_rxbuf_init will
* re-setup the rx descriptor using the replacement dma
* buffer we just installed above.
*/
bf->bf_m = mnew;
m->m_data += off - hdrlen;
m->m_pkthdr.len = m->m_len = pktlen;
- m->m_pkthdr.rcvif = ifp;
/*
* Piece 802.11 header together.
*/
wh = mtod(m, struct ieee80211_qosframe *);
/* NB: don't need to do this sometimes but ... */
/* XXX special case so we can memcpy after m_devget? */
ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
if (IEEE80211_QOS_HAS_SEQ(wh)) {
if (IEEE80211_DIR_DSTODS(wh)) {
wh4 = mtod(m,
struct ieee80211_qosframe_addr4*);
*(uint16_t *)wh4->i_qos = ds->qosctrl;
} else {
*(uint16_t *)wh->i_qos = ds->qosctrl;
}
}
if (ieee80211_radiotap_active(ic)) {
sc->malo_rx_th.wr_flags = 0;
sc->malo_rx_th.wr_rate = ds->rate;
sc->malo_rx_th.wr_antsignal = rssi;
sc->malo_rx_th.wr_antnoise = ds->nf;
}
#ifdef MALO_DEBUG
if (IFF_DUMPPKTS_RECV(sc, wh)) {
ieee80211_dump_pkt(ic, mtod(m, caddr_t),
len, ds->rate, rssi);
}
#endif
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
-
/* dispatch */
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, ds->nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, ds->nf);
rx_next:
/* NB: ignore ENOMEM so we process more descriptors */
(void) malo_rxbuf_init(sc, bf);
bf = STAILQ_NEXT(bf, bf_list);
}
malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read, readptr);
sc->malo_rxnext = bf;
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
- !IFQ_IS_EMPTY(&ifp->if_snd))
- malo_start(ifp);
+ if (mbufq_first(&sc->malo_snd) != NULL)
+ malo_start(sc);
#undef IEEE80211_DIR_DSTODS
}
-static void
-malo_stop(struct ifnet *ifp, int disable)
-{
- struct malo_softc *sc = ifp->if_softc;
-
- MALO_LOCK(sc);
- malo_stop_locked(ifp, disable);
- MALO_UNLOCK(sc);
-}
-
/*
* Reclaim all tx queue resources.
*/
static void
malo_tx_cleanup(struct malo_softc *sc)
{
int i;
for (i = 0; i < MALO_NUM_TX_QUEUES; i++)
malo_tx_cleanupq(sc, &sc->malo_txq[i]);
}
int
malo_detach(struct malo_softc *sc)
{
- struct ifnet *ifp = sc->malo_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->malo_ic;
- DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
+ malo_stop(sc);
- malo_stop(ifp, 1);
-
if (sc->malo_tq != NULL) {
taskqueue_drain(sc->malo_tq, &sc->malo_rxtask);
taskqueue_drain(sc->malo_tq, &sc->malo_txtask);
taskqueue_free(sc->malo_tq);
sc->malo_tq = NULL;
}
/*
* NB: the order of these is important:
* o call the 802.11 layer before detaching the hal to
* insure callbacks into the driver to delete global
* key cache entries can be handled
* o reclaim the tx queue data structures after calling
* the 802.11 layer as we'll get called back to reclaim
* node state and potentially want to use them
* o to cleanup the tx queues the hal is called, so detach
* it last
* Other than that, it's straightforward...
*/
ieee80211_ifdetach(ic);
callout_drain(&sc->malo_watchdog_timer);
malo_dma_cleanup(sc);
malo_tx_cleanup(sc);
malo_hal_detach(sc->malo_mh);
- if_free(ifp);
-
+ mbufq_drain(&sc->malo_snd);
MALO_LOCK_DESTROY(sc);
return 0;
}
void
malo_shutdown(struct malo_softc *sc)
{
- malo_stop(sc->malo_ifp, 1);
+
+ malo_stop(sc);
}
void
malo_suspend(struct malo_softc *sc)
{
- struct ifnet *ifp = sc->malo_ifp;
- DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
-
- malo_stop(ifp, 1);
+ malo_stop(sc);
}
void
malo_resume(struct malo_softc *sc)
{
- struct ifnet *ifp = sc->malo_ifp;
- DPRINTF(sc, MALO_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
-
- if (ifp->if_flags & IFF_UP)
+ if (sc->malo_ic.ic_nrunning > 0)
malo_init(sc);
}
Index: head/sys/dev/malo/if_malo.h
===================================================================
--- head/sys/dev/malo/if_malo.h (revision 287196)
+++ head/sys/dev/malo/if_malo.h (revision 287197)
@@ -1,587 +1,589 @@
/*-
* Copyright (c) 2007 Marvell Semiconductor, Inc.
* Copyright (c) 2007 Sam Leffler, Errno Consulting
* Copyright (c) 2008 Weongyo Jeong <weongyo@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
/*
* Definitions for the Marvell 88W8335 Wireless LAN controller.
*/
#ifndef _DEV_MALO_H
#define _DEV_MALO_H
#include <net80211/ieee80211_radiotap.h>
#include <dev/malo/if_malohal.h>
#include <dev/malo/if_maloioctl.h>
#ifndef MALO_TXBUF
#define MALO_TXBUF 256 /* number of TX descriptors/buffers */
#endif
#ifndef MALO_RXBUF
#define MALO_RXBUF 256 /* number of RX descriptors/buffers */
#endif
#define MALO_TXDESC 1 /* max tx descriptors/segments */
#define MALO_RXSIZE PAGE_SIZE
#define MALO_RSSI_DUMMY_MARKER 127
#define MALO_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */
#define MALO_REG_INT_CODE 0x00000C14
/* From host to ARM */
#define MALO_REG_H2A_INTERRUPT_EVENTS 0x00000C18
/* bit definitions for MALO_REG_H2A_INTERRUPT_CAUSE */
#define MALO_H2ARIC_BIT_PPA_READY 0x00000001
#define MALO_H2ARIC_BIT_DOOR_BELL 0x00000002 /* bit 1 */
#define MALO_H2ARIC_BIT_PS 0x00000004
#define MALO_H2ARIC_BIT_PSPOLL 0x00000008 /* bit 3 */
/* From ARM to host */
#define MALO_REG_A2H_INTERRUPT_CAUSE 0x00000C30
#define MALO_REG_A2H_INTERRUPT_MASK 0x00000C34
#define MALO_REG_A2H_INTERRUPT_CLEAR_SEL 0x00000C38
#define MALO_REG_A2H_INTERRUPT_STATUS_MASK 0x00000C3C
/* bit definitions for MALO_REG_A2H_INTERRUPT_CAUSE */
#define MALO_A2HRIC_BIT_TX_DONE 0x00000001 /* bit 0 */
#define MALO_A2HRIC_BIT_RX_RDY 0x00000002 /* bit 1 */
#define MALO_A2HRIC_BIT_OPC_DONE 0x00000004
#define MALO_A2HRIC_BIT_MAC_EVENT 0x00000008
#define MALO_A2HRIC_BIT_RX_PROBLEM 0x00000010
#define MALO_A2HRIC_BIT_RADIO_OFF 0x00000020 /* bit 5 */
#define MALO_A2HRIC_BIT_RADIO_ON 0x00000040
#define MALO_A2HRIC_BIT_RADAR_DETECT 0x00000080
#define MALO_A2HRIC_BIT_ICV_ERROR 0x00000100
#define MALO_A2HRIC_BIT_MIC_ERROR 0x00000200 /* bit 9 */
#define MALO_A2HRIC_BIT_QUEUE_EMPTY 0x00000400
#define MALO_A2HRIC_BIT_QUEUE_FULL 0x00000800
#define MALO_A2HRIC_BIT_CHAN_SWITCH 0x00001000
#define MALO_A2HRIC_BIT_TX_WATCHDOG 0x00002000
#define MALO_A2HRIC_BIT_BA_WATCHDOG 0x00004000
#define MALO_ISR_SRC_BITS \
(MALO_A2HRIC_BIT_RX_RDY | \
MALO_A2HRIC_BIT_TX_DONE | \
MALO_A2HRIC_BIT_OPC_DONE | \
MALO_A2HRIC_BIT_MAC_EVENT | \
MALO_A2HRIC_BIT_MIC_ERROR | \
MALO_A2HRIC_BIT_ICV_ERROR | \
MALO_A2HRIC_BIT_RADAR_DETECT | \
MALO_A2HRIC_BIT_CHAN_SWITCH | \
MALO_A2HRIC_BIT_TX_WATCHDOG | \
MALO_A2HRIC_BIT_QUEUE_EMPTY)
#define MALO_ISR_RESET (1<<15)
#define MALO_A2HRIC_BIT_MASK MALO_ISR_SRC_BITS
/* map to 0x80000000 on BAR1 */
#define MALO_REG_GEN_PTR 0x00000C10
#define MALO_REG_INT_CODE 0x00000C14
#define MALO_REG_SCRATCH 0x00000C40
/*
* define OpMode for SoftAP/Station mode
*
* the following mode signature has to be written to PCI scratch register#0
* right after successfully downloading the last block of firmware and
* before waiting for firmware ready signature
*/
#define MALO_HOSTCMD_STA_MODE 0x5A
#define MALO_HOSTCMD_STA_FWRDY_SIGNATURE 0xF0F1F2F4
/*
* 16 bit host command code
*/
#define MALO_HOSTCMD_NONE 0x0000
#define MALO_HOSTCMD_CODE_DNLD 0x0001
#define MALO_HOSTCMD_GET_HW_SPEC 0x0003
#define MALO_HOSTCMD_SET_HW_SPEC 0x0004
#define MALO_HOSTCMD_MAC_MULTICAST_ADR 0x0010
#define MALO_HOSTCMD_SET_WEPKEY 0x0013
#define MALO_HOSTCMD_802_11_RADIO_CONTROL 0x001c
#define MALO_HOSTCMD_802_11_RF_TX_POWER 0x001e
#define MALO_HOSTCMD_802_11_RF_ANTENNA 0x0020
#define MALO_HOSTCMD_SET_PRE_SCAN 0x0107
#define MALO_HOSTCMD_SET_POST_SCAN 0x0108
#define MALO_HOSTCMD_SET_RF_CHANNEL 0x010a
#define MALO_HOSTCMD_SET_AID 0x010d
#define MALO_HOSTCMD_SET_RATE 0x0110
#define MALO_HOSTCMD_SET_SLOT 0x0114
/* define DFS lab commands */
#define MALO_HOSTCMD_SET_FIXED_RATE 0x0126
#define MALO_HOSTCMD_SET_REGION_POWER 0x0128
#define MALO_HOSTCMD_GET_CALTABLE 0x1134
/*
* definition of action or option for each command.
*/
/* define general purpose action */
#define MALO_HOSTCMD_ACT_GEN_GET 0x0000
#define MALO_HOSTCMD_ACT_GEN_SET 0x0001
#define MALO_HOSTCMD_ACT_GEN_SET_LIST 0x0002
/* define action or option for HostCmd_FW_USE_FIXED_RATE */
#define MALO_HOSTCMD_ACT_USE_FIXED_RATE 0x0001
#define MALO_HOSTCMD_ACT_NOT_USE_FIXED_RATE 0x0002
/* INT code register event definition */
#define MALO_INT_CODE_CMD_FINISHED 0x00000005
struct malo_cmd_header {
uint16_t cmd;
uint16_t length;
uint16_t seqnum;
uint16_t result;
} __packed;
struct malo_cmd_caltable {
struct malo_cmd_header cmdhdr;
uint8_t annex;
uint8_t index;
uint8_t len;
uint8_t reserverd;
#define MALO_CAL_TBL_SIZE 160
uint8_t caltbl[MALO_CAL_TBL_SIZE];
} __packed;
struct malo_cmd_get_hwspec {
struct malo_cmd_header cmdhdr;
u_int8_t version; /* version of the HW */
u_int8_t hostif; /* host interface */
/* Max. number of WCB FW can handle */
u_int16_t num_wcb;
/* MaxNbr of MC addresses FW can handle */
u_int16_t num_mcastaddr;
/* MAC address programmed in HW */
u_int8_t permaddr[6];
u_int16_t regioncode;
/* Number of antenna used */
u_int16_t num_antenna;
/* 4 byte of FW release number */
u_int32_t fw_releasenum;
u_int32_t wcbbase0;
u_int32_t rxpdwr_ptr;
u_int32_t rxpdrd_ptr;
u_int32_t ul_fw_awakecookie;
u_int32_t wcbbase1;
u_int32_t wcbbase2;
u_int32_t wcbbase3;
} __packed;
struct malo_cmd_set_hwspec {
struct malo_cmd_header cmdhdr;
uint8_t version; /* HW revision */
uint8_t hostif; /* Host interface */
/* Max. number of Multicast address FW can handle */
uint16_t num_mcastaddr;
uint8_t permaddr[6]; /* MAC address */
uint16_t regioncode; /* Region Code */
/* 4 byte of FW release number */
uint32_t fwreleasenum;
/* Firmware awake cookie */
uint32_t ul_fw_awakecookie;
/* Device capabilities (see above) */
uint32_t devicecaps;
uint32_t rxpdwrptr; /* Rx shared memory queue */
/* # TX queues in WcbBase array */
uint32_t num_txqueues;
/* TX WCB Rings */
uint32_t wcbbase[MALO_MAX_TXWCB_QUEUES];
uint32_t flags;
uint32_t txwcbnum_per_queue;
uint32_t total_rxwcb;
} __packed;
/* DS 802.11 */
struct malo_cmd_rf_antenna {
struct malo_cmd_header cmdhdr;
uint16_t action;
/* Number of antennas or 0xffff (diversity) */
uint16_t mode;
} __packed;
struct malo_cmd_radio_control {
struct malo_cmd_header cmdhdr;
uint16_t action;
/*
* bit 0 : 1 = on, 0 = off
* bit 1 : 1 = long, 0 = short
* bit 2 : 1 = auto, 0 = fix
*/
uint16_t control;
uint16_t radio_on;
} __packed;
struct malo_cmd_fw_set_wmmmode {
struct malo_cmd_header cmdhdr;
uint16_t action; /* 0 -> unset, 1 -> set */
} __packed;
struct malo_cmd_fw_set_rf_channel {
struct malo_cmd_header cmdhdr;
uint16_t action;
uint8_t cur_channel; /* channel # */
} __packed;
#define MALO_TX_POWER_LEVEL_TOTAL 8
struct malo_cmd_rf_tx_power {
struct malo_cmd_header cmdhdr;
uint16_t action;
uint16_t support_txpower_level;
uint16_t current_txpower_level;
uint16_t reserved;
uint16_t power_levellist[MALO_TX_POWER_LEVEL_TOTAL];
} __packed;
struct malo_fixrate_flag {
/* lower rate after the retry count. 0 = legacy, 1 = HT */
uint32_t type;
/* 0: retry count is not valid, 1: use retry count specified */
uint32_t retrycount_valid;
} __packed;
struct malo_fixed_rate_entry {
struct malo_fixrate_flag typeflags;
/* legacy rate(not index) or an MCS code. */
uint32_t fixedrate;
uint32_t retrycount;
} __packed;
struct malo_cmd_fw_use_fixed_rate {
struct malo_cmd_header cmdhdr;
/*
* MALO_HOSTCMD_ACT_GEN_GET 0x0000
* MALO_HOSTCMD_ACT_GEN_SET 0x0001
* MALO_HOSTCMD_ACT_NOT_USE_FIXED_RATE 0x0002
*/
uint32_t action;
/* use fixed rate specified but firmware can drop to */
uint32_t allowratedrop;
uint32_t entrycount;
struct malo_fixed_rate_entry fixedrate_table[4];
uint8_t multicast_rate;
uint8_t multirate_txtype;
uint8_t management_rate;
} __packed;
#define MALO_RATE_INDEX_MAX_ARRAY 14
struct malo_cmd_fw_set_aid {
struct malo_cmd_header cmdhdr;
uint16_t associd;
uint8_t macaddr[6]; /* AP's Mac Address(BSSID) */
uint32_t gprotection;
uint8_t aprates[MALO_RATE_INDEX_MAX_ARRAY];
} __packed;
struct malo_cmd_prescan {
struct malo_cmd_header cmdhdr;
} __packed;
struct malo_cmd_postscan {
struct malo_cmd_header cmdhdr;
uint32_t isibss;
uint8_t bssid[6];
} __packed;
struct malo_cmd_fw_setslot {
struct malo_cmd_header cmdhdr;
uint16_t action;
/* slot = 0 if regular, slot = 1 if short. */
uint8_t slot;
};
struct malo_cmd_set_rate {
struct malo_cmd_header cmdhdr;
uint8_t dataratetype;
uint8_t rateindex;
uint8_t aprates[14];
} __packed;
struct malo_cmd_wepkey {
struct malo_cmd_header cmdhdr;
uint16_t action;
uint8_t len;
uint8_t flags;
uint16_t index;
uint8_t value[IEEE80211_KEYBUF_SIZE];
uint8_t txmickey[IEEE80211_WEP_MICLEN];
uint8_t rxmickey[IEEE80211_WEP_MICLEN];
uint64_t rxseqctr;
uint64_t txseqctr;
} __packed;
struct malo_cmd_mcast {
struct malo_cmd_header cmdhdr;
uint16_t action;
uint16_t numaddr;
#define MALO_HAL_MCAST_MAX 32
uint8_t maclist[6*32];
} __packed;
/*
* DMA state for tx/rx descriptors.
*/
/*
* Common "base class" for tx/rx descriptor resources
* allocated using the bus dma api.
*/
struct malo_descdma {
const char* dd_name;
void *dd_desc; /* descriptors */
bus_addr_t dd_desc_paddr; /* physical addr of dd_desc */
bus_size_t dd_desc_len; /* size of dd_desc */
bus_dma_segment_t dd_dseg;
int dd_dnseg; /* number of segments */
bus_dma_tag_t dd_dmat; /* bus DMA tag */
bus_dmamap_t dd_dmamap; /* DMA map for descriptors */
void *dd_bufptr; /* associated buffers */
};
/*
* Hardware tx/rx descriptors.
*
* NB: tx descriptor size must match f/w expected size
* because f/w prefetch's the next descriptor linearly
* and doesn't chase the next pointer.
*/
struct malo_txdesc {
uint32_t status;
#define MALO_TXD_STATUS_IDLE 0x00000000
#define MALO_TXD_STATUS_USED 0x00000001
#define MALO_TXD_STATUS_OK 0x00000001
#define MALO_TXD_STATUS_OK_RETRY 0x00000002
#define MALO_TXD_STATUS_OK_MORE_RETRY 0x00000004
#define MALO_TXD_STATUS_MULTICAST_TX 0x00000008
#define MALO_TXD_STATUS_BROADCAST_TX 0x00000010
#define MALO_TXD_STATUS_FAILED_LINK_ERROR 0x00000020
#define MALO_TXD_STATUS_FAILED_EXCEED_LIMIT 0x00000040
#define MALO_TXD_STATUS_FAILED_XRETRY MALO_TXD_STATUS_FAILED_EXCEED_LIMIT
#define MALO_TXD_STATUS_FAILED_AGING 0x00000080
#define MALO_TXD_STATUS_FW_OWNED 0x80000000
uint8_t datarate;
uint8_t txpriority;
uint16_t qosctrl;
uint32_t pktptr;
uint16_t pktlen;
uint8_t destaddr[6];
uint32_t physnext;
uint32_t sap_pktinfo;
uint16_t format;
#define MALO_TXD_FORMAT 0x0001 /* frame format/rate */
#define MALO_TXD_FORMAT_LEGACY 0x0000 /* legacy rate frame */
#define MALO_TXD_RATE 0x01f8 /* tx rate (legacy)/ MCS */
#define MALO_TXD_RATE_S 3
/* NB: 3 is reserved */
#define MALO_TXD_ANTENNA 0x1800 /* antenna select */
#define MALO_TXD_ANTENNA_S 11
uint16_t pad; /* align to 4-byte boundary */
} __packed;
#define MALO_TXDESC_SYNC(txq, ds, how) do { \
bus_dmamap_sync((txq)->dma.dd_dmat, (txq)->dma.dd_dmamap, how); \
} while(0)
struct malo_rxdesc {
uint8_t rxcontrol; /* control element */
#define MALO_RXD_CTRL_DRIVER_OWN 0x00
#define MALO_RXD_CTRL_OS_OWN 0x04
#define MALO_RXD_CTRL_DMA_OWN 0x80
uint8_t snr; /* signal to noise ratio */
uint8_t status; /* status field w/ USED bit */
#define MALO_RXD_STATUS_IDLE 0x00
#define MALO_RXD_STATUS_OK 0x01
#define MALO_RXD_STATUS_MULTICAST_RX 0x02
#define MALO_RXD_STATUS_BROADCAST_RX 0x04
#define MALO_RXD_STATUS_FRAGMENT_RX 0x08
#define MALO_RXD_STATUS_GENERAL_DECRYPT_ERR 0xff
#define MALO_RXD_STATUS_DECRYPT_ERR_MASK 0x80
#define MALO_RXD_STATUS_TKIP_MIC_DECRYPT_ERR 0x02
#define MALO_RXD_STATUS_WEP_ICV_DECRYPT_ERR 0x04
#define MALO_RXD_STATUS_TKIP_ICV_DECRYPT_ERR 0x08
uint8_t channel; /* channel # pkt received on */
uint16_t pktlen; /* total length of received data */
uint8_t nf; /* noise floor */
uint8_t rate; /* received data rate */
uint32_t physbuffdata; /* physical address of payload data */
uint32_t physnext; /* physical address of next RX desc */
uint16_t qosctrl; /* received QosCtrl field variable */
uint16_t htsig2; /* like name states */
} __packed;
#define MALO_RXDESC_SYNC(sc, ds, how) do { \
bus_dmamap_sync((sc)->malo_rxdma.dd_dmat, \
(sc)->malo_rxdma.dd_dmamap, how); \
} while (0)
struct malo_rxbuf {
STAILQ_ENTRY(malo_rxbuf) bf_list;
void *bf_desc; /* h/w descriptor */
bus_addr_t bf_daddr; /* physical addr of desc */
bus_dmamap_t bf_dmamap;
bus_addr_t bf_data; /* physical addr of rx data */
struct mbuf *bf_m; /* jumbo mbuf */
};
typedef STAILQ_HEAD(, malo_rxbuf) malo_rxbufhead;
/*
* Software backed version of tx/rx descriptors. We keep
* the software state out of the h/w descriptor structure
* so that may be allocated in uncached memory w/o paying
* performance hit.
*/
struct malo_txbuf {
STAILQ_ENTRY(malo_txbuf) bf_list;
void *bf_desc; /* h/w descriptor */
bus_addr_t bf_daddr; /* physical addr of desc */
bus_dmamap_t bf_dmamap; /* DMA map for descriptors */
int bf_nseg;
bus_dma_segment_t bf_segs[MALO_TXDESC];
struct mbuf *bf_m;
struct ieee80211_node *bf_node;
struct malo_txq *bf_txq; /* backpointer to tx q/ring */
};
typedef STAILQ_HEAD(, malo_txbuf) malo_txbufhead;
/*
* TX/RX ring definitions. There are 4 tx rings, one
* per AC, and 1 rx ring. Note carefully that transmit
* descriptors are treated as a contiguous chunk and the
* firmware pre-fetches descriptors. This means that we
* must preserve order when moving descriptors between
* the active+free lists; otherwise we may stall transmit.
*/
struct malo_txq {
struct malo_descdma dma; /* bus dma resources */
struct mtx lock; /* tx q lock */
char name[12]; /* e.g. "malo0_txq4" */
int qnum; /* f/w q number */
int txpri; /* f/w tx priority */
int nfree; /* # buffers on free list */
malo_txbufhead free; /* queue of free buffers */
malo_txbufhead active; /* queue of active buffers */
};
#define MALO_TXQ_LOCK_INIT(_sc, _tq) do { \
snprintf((_tq)->name, sizeof((_tq)->name), "%s_txq%u", \
device_get_nameunit((_sc)->malo_dev), (_tq)->qnum); \
mtx_init(&(_tq)->lock, (_tq)->name, NULL, MTX_DEF); \
} while (0)
#define MALO_TXQ_LOCK_DESTROY(_tq) mtx_destroy(&(_tq)->lock)
#define MALO_TXQ_LOCK(_tq) mtx_lock(&(_tq)->lock)
#define MALO_TXQ_UNLOCK(_tq) mtx_unlock(&(_tq)->lock)
#define MALO_TXQ_LOCK_ASSERT(_tq) mtx_assert(&(_tq)->lock, MA_OWNED)
/*
* Each packet has fixed front matter: a 2-byte length
* of the payload, followed by a 4-address 802.11 header
* (regardless of the actual header and always w/o any
* QoS header). The payload then follows.
*/
struct malo_txrec {
uint16_t fwlen;
struct ieee80211_frame_addr4 wh;
} __packed;
struct malo_vap {
struct ieee80211vap malo_vap;
int (*malo_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define MALO_VAP(vap) ((struct malo_vap *)(vap))
struct malo_softc {
+ struct ieee80211com malo_ic;
+ struct mbufq malo_snd;
device_t malo_dev;
- struct ifnet *malo_ifp; /* interface common */
struct mtx malo_mtx; /* master lock (recursive) */
struct taskqueue *malo_tq; /* private task queue */
bus_dma_tag_t malo_dmat; /* bus DMA tag */
bus_space_handle_t malo_io0h; /* BAR 0 */
bus_space_tag_t malo_io0t;
bus_space_handle_t malo_io1h; /* BAR 1 */
bus_space_tag_t malo_io1t;
- unsigned int malo_invalid : 1,/* disable hardware accesses */
- malo_recvsetup : 1, /* recv setup */
- malo_fixedrate: 1; /* use fixed tx rate */
+ unsigned int malo_invalid: 1,/* disable hardware accesses */
+ malo_recvsetup: 1, /* recv setup */
+ malo_fixedrate: 1, /* use fixed tx rate */
+ malo_running: 1;
struct malo_hal *malo_mh; /* h/w access layer */
struct malo_hal_hwspec malo_hwspecs; /* h/w capabilities */
struct malo_hal_txrxdma malo_hwdma; /* h/w dma setup */
uint32_t malo_imask; /* interrupt mask copy */
struct malo_hal_channel malo_curchan;
u_int16_t malo_rxantenna; /* rx antenna */
u_int16_t malo_txantenna; /* tx antenna */
struct malo_descdma malo_rxdma; /* rx bus dma resources */
malo_rxbufhead malo_rxbuf; /* rx buffers */
struct malo_rxbuf *malo_rxnext; /* next rx buffer to process */
struct task malo_rxtask; /* rx int processing */
struct malo_txq malo_txq[MALO_NUM_TX_QUEUES];
struct task malo_txtask; /* tx int processing */
struct callout malo_watchdog_timer;
int malo_timer;
struct malo_tx_radiotap_header malo_tx_th;
struct malo_rx_radiotap_header malo_rx_th;
struct malo_stats malo_stats; /* interface statistics */
int malo_debug;
};
#define MALO_LOCK_INIT(_sc) \
mtx_init(&(_sc)->malo_mtx, device_get_nameunit((_sc)->malo_dev), \
NULL, MTX_DEF | MTX_RECURSE)
#define MALO_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->malo_mtx)
#define MALO_LOCK(_sc) mtx_lock(&(_sc)->malo_mtx)
#define MALO_UNLOCK(_sc) mtx_unlock(&(_sc)->malo_mtx)
#define MALO_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->malo_mtx, MA_OWNED)
#define MALO_RXFREE_INIT(_sc) \
mtx_init(&(_sc)->malo_rxlock, device_get_nameunit((_sc)->malo_dev), \
NULL, MTX_DEF)
#define MALO_RXFREE_DESTROY(_sc) mtx_destroy(&(_sc)->malo_rxlock)
#define MALO_RXFREE_LOCK(_sc) mtx_lock(&(_sc)->malo_rxlock)
#define MALO_RXFREE_UNLOCK(_sc) mtx_unlock(&(_sc)->malo_rxlock)
#define MALO_RXFREE_ASSERT(_sc) mtx_assert(&(_sc)->malo_rxlock, \
MA_OWNED)
int malo_attach(uint16_t, struct malo_softc *);
int malo_intr(void *);
int malo_detach(struct malo_softc *);
void malo_shutdown(struct malo_softc *);
void malo_suspend(struct malo_softc *);
void malo_resume(struct malo_softc *);
#endif
Index: head/sys/dev/mwl/if_mwl.c
===================================================================
--- head/sys/dev/mwl/if_mwl.c (revision 287196)
+++ head/sys/dev/mwl/if_mwl.c (revision 287197)
@@ -1,5037 +1,4926 @@
/*-
* Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
* Copyright (c) 2007-2008 Marvell Semiconductor, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for the Marvell 88W8363 Wireless LAN controller.
*/
#include "opt_inet.h"
#include "opt_mwl.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net/bpf.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif /* INET */
#include <dev/mwl/if_mwlvar.h>
#include <dev/mwl/mwldiag.h>
/* idiomatic shorthands: MS = mask+shift, SM = shift+mask */
#define MS(v,x) (((v) & x) >> x##_S)
#define SM(v,x) (((v) << x##_S) & x)
static struct ieee80211vap *mwl_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void mwl_vap_delete(struct ieee80211vap *);
static int mwl_setupdma(struct mwl_softc *);
static int mwl_hal_reset(struct mwl_softc *sc);
-static int mwl_init_locked(struct mwl_softc *);
-static void mwl_init(void *);
-static void mwl_stop_locked(struct ifnet *, int);
+static int mwl_init(struct mwl_softc *);
+static void mwl_parent(struct ieee80211com *);
static int mwl_reset(struct ieee80211vap *, u_long);
-static void mwl_stop(struct ifnet *, int);
-static void mwl_start(struct ifnet *);
+static void mwl_stop(struct mwl_softc *);
+static void mwl_start(struct mwl_softc *);
+static int mwl_transmit(struct ieee80211com *, struct mbuf *);
static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static int mwl_media_change(struct ifnet *);
static void mwl_watchdog(void *);
-static int mwl_ioctl(struct ifnet *, u_long, caddr_t);
+static int mwl_ioctl(struct ieee80211com *, u_long, void *);
static void mwl_radar_proc(void *, int);
static void mwl_chanswitch_proc(void *, int);
static void mwl_bawatchdog_proc(void *, int);
static int mwl_key_alloc(struct ieee80211vap *,
struct ieee80211_key *,
ieee80211_keyix *, ieee80211_keyix *);
static int mwl_key_delete(struct ieee80211vap *,
const struct ieee80211_key *);
static int mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
static int mwl_mode_init(struct mwl_softc *);
static void mwl_update_mcast(struct ieee80211com *);
static void mwl_update_promisc(struct ieee80211com *);
static void mwl_updateslot(struct ieee80211com *);
static int mwl_beacon_setup(struct ieee80211vap *);
static void mwl_beacon_update(struct ieee80211vap *, int);
#ifdef MWL_HOST_PS_SUPPORT
static void mwl_update_ps(struct ieee80211vap *, int);
static int mwl_set_tim(struct ieee80211_node *, int);
#endif
static int mwl_dma_setup(struct mwl_softc *);
static void mwl_dma_cleanup(struct mwl_softc *);
static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *,
const uint8_t [IEEE80211_ADDR_LEN]);
static void mwl_node_cleanup(struct ieee80211_node *);
static void mwl_node_drain(struct ieee80211_node *);
static void mwl_node_getsignal(const struct ieee80211_node *,
int8_t *, int8_t *);
static void mwl_node_getmimoinfo(const struct ieee80211_node *,
struct ieee80211_mimo_info *);
static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *);
static void mwl_rx_proc(void *, int);
static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int);
static int mwl_tx_setup(struct mwl_softc *, int, int);
static int mwl_wme_update(struct ieee80211com *);
static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *);
static void mwl_tx_cleanup(struct mwl_softc *);
static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *);
static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *,
struct mwl_txbuf *, struct mbuf *);
static void mwl_tx_proc(void *, int);
static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *);
static void mwl_draintxq(struct mwl_softc *);
static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *);
static int mwl_recv_action(struct ieee80211_node *,
const struct ieee80211_frame *,
const uint8_t *, const uint8_t *);
static int mwl_addba_request(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int dialogtoken,
int baparamset, int batimeout);
static int mwl_addba_response(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int status,
int baparamset, int batimeout);
static void mwl_addba_stop(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
static int mwl_startrecv(struct mwl_softc *);
static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *,
struct ieee80211_channel *);
static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*);
static void mwl_scan_start(struct ieee80211com *);
static void mwl_scan_end(struct ieee80211com *);
static void mwl_set_channel(struct ieee80211com *);
static int mwl_peerstadb(struct ieee80211_node *,
int aid, int staid, MWL_HAL_PEERINFO *pi);
static int mwl_localstadb(struct ieee80211vap *);
static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static int allocstaid(struct mwl_softc *sc, int aid);
static void delstaid(struct mwl_softc *sc, int staid);
static void mwl_newassoc(struct ieee80211_node *, int);
static void mwl_agestations(void *);
static int mwl_setregdomain(struct ieee80211com *,
struct ieee80211_regdomain *, int,
struct ieee80211_channel []);
static void mwl_getradiocaps(struct ieee80211com *, int, int *,
struct ieee80211_channel []);
static int mwl_getchannels(struct mwl_softc *);
static void mwl_sysctlattach(struct mwl_softc *);
static void mwl_announce(struct mwl_softc *);
SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters");
static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc,
0, "rx descriptors allocated");
static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf,
0, "rx buffers allocated");
static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf,
0, "tx buffers allocated");
static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/
SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce,
0, "tx buffers to send at once");
static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota,
0, "max rx buffers to process per interrupt");
static int mwl_rxdmalow = 3; /* # min buffers for wakeup */
SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow,
0, "min free rx buffers before restarting traffic");
#ifdef MWL_DEBUG
static int mwl_debug = 0;
SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug,
0, "control debugging printfs");
enum {
MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */
MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
MWL_DEBUG_RESET = 0x00000010, /* reset processing */
MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */
MWL_DEBUG_INTR = 0x00000040, /* ISR */
MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */
MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */
MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */
MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */
MWL_DEBUG_NODE = 0x00000800, /* node management */
MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */
MWL_DEBUG_TSO = 0x00002000, /* TSO processing */
MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */
MWL_DEBUG_ANY = 0xffffffff
};
#define IS_BEACON(wh) \
((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
(IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
#define IFF_DUMPPKTS_RECV(sc, wh) \
- (((sc->sc_debug & MWL_DEBUG_RECV) && \
- ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) || \
- (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
+ ((sc->sc_debug & MWL_DEBUG_RECV) && \
+ ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh)))
#define IFF_DUMPPKTS_XMIT(sc) \
- ((sc->sc_debug & MWL_DEBUG_XMIT) || \
- (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
+ (sc->sc_debug & MWL_DEBUG_XMIT)
+
#define DPRINTF(sc, m, fmt, ...) do { \
if (sc->sc_debug & (m)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#define KEYPRINTF(sc, hk, mac) do { \
if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \
mwl_keyprint(sc, __func__, hk, mac); \
} while (0)
static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix);
static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix);
#else
-#define IFF_DUMPPKTS_RECV(sc, wh) \
- ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
-#define IFF_DUMPPKTS_XMIT(sc) \
- ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
-#define DPRINTF(sc, m, fmt, ...) do { \
- (void) sc; \
-} while (0)
-#define KEYPRINTF(sc, k, mac) do { \
- (void) sc; \
-} while (0)
+#define IFF_DUMPPKTS_RECV(sc, wh) 0
+#define IFF_DUMPPKTS_XMIT(sc) 0
+#define DPRINTF(sc, m, fmt, ...) do { (void )sc; } while (0)
+#define KEYPRINTF(sc, k, mac) do { (void )sc; } while (0)
#endif
static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers");
/*
* Each packet has fixed front matter: a 2-byte length
* of the payload, followed by a 4-address 802.11 header
* (regardless of the actual header and always w/o any
* QoS header). The payload then follows.
*/
struct mwltxrec {
uint16_t fwlen;
struct ieee80211_frame_addr4 wh;
} __packed;
/*
* Read/Write shorthands for accesses to BAR 0. Note
* that all BAR 1 operations are done in the "hal" and
* there should be no reference to them here.
*/
#ifdef MWL_DEBUG
static __inline uint32_t
RD4(struct mwl_softc *sc, bus_size_t off)
{
return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off);
}
#endif
static __inline void
WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val)
{
bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val);
}
int
mwl_attach(uint16_t devid, struct mwl_softc *sc)
{
- struct ifnet *ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mwl_hal *mh;
int error = 0;
DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "cannot if_alloc()\n");
- return ENOSPC;
- }
- ic = ifp->if_l2com;
-
/*
* Setup the RX free list lock early, so it can be consistently
* removed.
*/
MWL_RXFREE_INIT(sc);
- /* set these up early for if_printf use */
- if_initname(ifp, device_get_name(sc->sc_dev),
- device_get_unit(sc->sc_dev));
-
mh = mwl_hal_attach(sc->sc_dev, devid,
sc->sc_io1h, sc->sc_io1t, sc->sc_dmat);
if (mh == NULL) {
- if_printf(ifp, "unable to attach HAL\n");
+ device_printf(sc->sc_dev, "unable to attach HAL\n");
error = EIO;
goto bad;
}
sc->sc_mh = mh;
/*
* Load firmware so we can get setup. We arbitrarily
* pick station firmware; we'll re-load firmware as
* needed so setting up the wrong mode isn't a big deal.
*/
if (mwl_hal_fwload(mh, NULL) != 0) {
- if_printf(ifp, "unable to setup builtin firmware\n");
+ device_printf(sc->sc_dev, "unable to setup builtin firmware\n");
error = EIO;
goto bad1;
}
if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
- if_printf(ifp, "unable to fetch h/w specs\n");
+ device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
error = EIO;
goto bad1;
}
error = mwl_getchannels(sc);
if (error != 0)
goto bad1;
sc->sc_txantenna = 0; /* h/w default */
sc->sc_rxantenna = 0; /* h/w default */
sc->sc_invalid = 0; /* ready to go, enable int handling */
sc->sc_ageinterval = MWL_AGEINTERVAL;
/*
* Allocate tx+rx descriptors and populate the lists.
* We immediately push the information to the firmware
* as otherwise it gets upset.
*/
error = mwl_dma_setup(sc);
if (error != 0) {
- if_printf(ifp, "failed to setup descriptors: %d\n", error);
+ device_printf(sc->sc_dev, "failed to setup descriptors: %d\n",
+ error);
goto bad1;
}
error = mwl_setupdma(sc); /* push to firmware */
if (error != 0) /* NB: mwl_setupdma prints msg */
goto bad1;
callout_init(&sc->sc_timer, 1);
callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT,
taskqueue_thread_enqueue, &sc->sc_tq);
taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
- "%s taskq", ifp->if_xname);
+ "%s taskq", device_get_nameunit(sc->sc_dev));
TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc);
TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc);
TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc);
TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc);
/* NB: insure BK queue is the lowest priority h/w queue */
if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) {
- if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
- ieee80211_wme_acnames[WME_AC_BK]);
+ device_printf(sc->sc_dev,
+ "unable to setup xmit queue for %s traffic!\n",
+ ieee80211_wme_acnames[WME_AC_BK]);
error = EIO;
goto bad2;
}
if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) ||
!mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) ||
!mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) {
/*
* Not enough hardware tx queues to properly do WME;
* just punt and assign them all to the same h/w queue.
* We could do a better job of this if, for example,
* we allocate queues when we switch from station to
* AP mode.
*/
if (sc->sc_ac2q[WME_AC_VI] != NULL)
mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
if (sc->sc_ac2q[WME_AC_BE] != NULL)
mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
}
TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc);
- ifp->if_softc = sc;
- ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
- ifp->if_start = mwl_start;
- ifp->if_ioctl = mwl_ioctl;
- ifp->if_init = mwl_init;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(sc->sc_dev);
/* XXX not right but it's not used anywhere important */
ic->ic_phytype = IEEE80211_T_OFDM;
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_HOSTAP /* hostap mode */
| IEEE80211_C_MONITOR /* monitor mode */
#if 0
| IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
#endif
| IEEE80211_C_MBSS /* mesh point link mode */
| IEEE80211_C_WDS /* WDS supported */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WME /* WME/WMM supported */
| IEEE80211_C_BURST /* xmit bursting supported */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
| IEEE80211_C_TXFRAG /* handle tx frags */
| IEEE80211_C_TXPMGT /* capable of txpow mgt */
| IEEE80211_C_DFS /* DFS supported */
;
ic->ic_htcaps =
IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */
| IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */
| IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
| IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
| IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */
#if MWL_AGGR_SIZE == 7935
| IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */
#else
| IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */
#endif
#if 0
| IEEE80211_HTCAP_PSMP /* PSMP supported */
| IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */
#endif
/* s/w capabilities */
| IEEE80211_HTC_HT /* HT operation */
| IEEE80211_HTC_AMPDU /* tx A-MPDU */
| IEEE80211_HTC_AMSDU /* tx A-MSDU */
| IEEE80211_HTC_SMPS /* SMPS available */
;
/*
* Mark h/w crypto support.
* XXX no way to query h/w support.
*/
ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP
| IEEE80211_CRYPTO_AES_CCM
| IEEE80211_CRYPTO_TKIP
| IEEE80211_CRYPTO_TKIPMIC
;
/*
* Transmit requires space in the packet for a special
* format transmit record and optional padding between
* this record and the payload. Ask the net80211 layer
* to arrange this when encapsulating packets so we can
* add it efficiently.
*/
ic->ic_headroom = sizeof(struct mwltxrec) -
sizeof(struct ieee80211_frame);
+ IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr);
+
/* call MI attach routine. */
- ieee80211_ifattach(ic, sc->sc_hwspecs.macAddr);
+ ieee80211_ifattach(ic);
ic->ic_setregdomain = mwl_setregdomain;
ic->ic_getradiocaps = mwl_getradiocaps;
/* override default methods */
ic->ic_raw_xmit = mwl_raw_xmit;
ic->ic_newassoc = mwl_newassoc;
ic->ic_updateslot = mwl_updateslot;
ic->ic_update_mcast = mwl_update_mcast;
ic->ic_update_promisc = mwl_update_promisc;
ic->ic_wme.wme_update = mwl_wme_update;
+ ic->ic_transmit = mwl_transmit;
+ ic->ic_ioctl = mwl_ioctl;
+ ic->ic_parent = mwl_parent;
ic->ic_node_alloc = mwl_node_alloc;
sc->sc_node_cleanup = ic->ic_node_cleanup;
ic->ic_node_cleanup = mwl_node_cleanup;
sc->sc_node_drain = ic->ic_node_drain;
ic->ic_node_drain = mwl_node_drain;
ic->ic_node_getsignal = mwl_node_getsignal;
ic->ic_node_getmimoinfo = mwl_node_getmimoinfo;
ic->ic_scan_start = mwl_scan_start;
ic->ic_scan_end = mwl_scan_end;
ic->ic_set_channel = mwl_set_channel;
sc->sc_recv_action = ic->ic_recv_action;
ic->ic_recv_action = mwl_recv_action;
sc->sc_addba_request = ic->ic_addba_request;
ic->ic_addba_request = mwl_addba_request;
sc->sc_addba_response = ic->ic_addba_response;
ic->ic_addba_response = mwl_addba_response;
sc->sc_addba_stop = ic->ic_addba_stop;
ic->ic_addba_stop = mwl_addba_stop;
ic->ic_vap_create = mwl_vap_create;
ic->ic_vap_delete = mwl_vap_delete;
ieee80211_radiotap_attach(ic,
&sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
MWL_TX_RADIOTAP_PRESENT,
&sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
MWL_RX_RADIOTAP_PRESENT);
/*
* Setup dynamic sysctl's now that country code and
* regdomain are available from the hal.
*/
mwl_sysctlattach(sc);
if (bootverbose)
ieee80211_announce(ic);
mwl_announce(sc);
return 0;
bad2:
mwl_dma_cleanup(sc);
bad1:
mwl_hal_detach(mh);
bad:
MWL_RXFREE_DESTROY(sc);
- if_free(ifp);
sc->sc_invalid = 1;
return error;
}
int
mwl_detach(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
- DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
-
- mwl_stop(ifp, 1);
+ MWL_LOCK(sc);
+ mwl_stop(sc);
+ MWL_UNLOCK(sc);
/*
* NB: the order of these is important:
* o call the 802.11 layer before detaching the hal to
* insure callbacks into the driver to delete global
* key cache entries can be handled
* o reclaim the tx queue data structures after calling
* the 802.11 layer as we'll get called back to reclaim
* node state and potentially want to use them
* o to cleanup the tx queues the hal is called, so detach
* it last
* Other than that, it's straightforward...
*/
ieee80211_ifdetach(ic);
callout_drain(&sc->sc_watchdog);
mwl_dma_cleanup(sc);
MWL_RXFREE_DESTROY(sc);
mwl_tx_cleanup(sc);
mwl_hal_detach(sc->sc_mh);
- if_free(ifp);
+ mbufq_drain(&sc->sc_snd);
return 0;
}
/*
* MAC address handling for multiple BSS on the same radio.
* The first vap uses the MAC address from the EEPROM. For
* subsequent vap's we set the U/L bit (bit 1) in the MAC
* address and use the next six bits as an index.
*/
static void
assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
{
int i;
if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) {
/* NB: we only do this if h/w supports multiple bssid */
for (i = 0; i < 32; i++)
if ((sc->sc_bssidmask & (1<<i)) == 0)
break;
if (i != 0)
mac[0] |= (i << 2)|0x2;
} else
i = 0;
sc->sc_bssidmask |= 1<<i;
if (i == 0)
sc->sc_nbssid0++;
}
static void
-reclaim_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN])
+reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
{
int i = mac[0] >> 2;
if (i != 0 || --sc->sc_nbssid0 == 0)
sc->sc_bssidmask &= ~(1<<i);
}
static struct ieee80211vap *
mwl_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac0[IEEE80211_ADDR_LEN])
{
- struct ifnet *ifp = ic->ic_ifp;
- struct mwl_softc *sc = ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
struct mwl_hal *mh = sc->sc_mh;
struct ieee80211vap *vap, *apvap;
struct mwl_hal_vap *hvap;
struct mwl_vap *mvp;
uint8_t mac[IEEE80211_ADDR_LEN];
IEEE80211_ADDR_COPY(mac, mac0);
switch (opmode) {
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
if ((flags & IEEE80211_CLONE_MACADDR) == 0)
assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac);
if (hvap == NULL) {
if ((flags & IEEE80211_CLONE_MACADDR) == 0)
reclaim_address(sc, mac);
return NULL;
}
break;
case IEEE80211_M_STA:
if ((flags & IEEE80211_CLONE_MACADDR) == 0)
assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac);
if (hvap == NULL) {
if ((flags & IEEE80211_CLONE_MACADDR) == 0)
reclaim_address(sc, mac);
return NULL;
}
/* no h/w beacon miss support; always use s/w */
flags |= IEEE80211_CLONE_NOBEACONS;
break;
case IEEE80211_M_WDS:
hvap = NULL; /* NB: we use associated AP vap */
if (sc->sc_napvaps == 0)
return NULL; /* no existing AP vap */
break;
case IEEE80211_M_MONITOR:
hvap = NULL;
break;
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
default:
return NULL;
}
- mvp = (struct mwl_vap *) malloc(sizeof(struct mwl_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (mvp == NULL) {
- if (hvap != NULL) {
- mwl_hal_delvap(hvap);
- if ((flags & IEEE80211_CLONE_MACADDR) == 0)
- reclaim_address(sc, mac);
- }
- /* XXX msg */
- return NULL;
- }
+ mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO);
mvp->mv_hvap = hvap;
if (opmode == IEEE80211_M_WDS) {
/*
* WDS vaps must have an associated AP vap; find one.
* XXX not right.
*/
TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next)
if (apvap->iv_opmode == IEEE80211_M_HOSTAP) {
mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap;
break;
}
KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap"));
}
vap = &mvp->mv_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
- if (hvap != NULL)
- IEEE80211_ADDR_COPY(vap->iv_myaddr, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override with driver methods */
mvp->mv_newstate = vap->iv_newstate;
vap->iv_newstate = mwl_newstate;
vap->iv_max_keyix = 0; /* XXX */
vap->iv_key_alloc = mwl_key_alloc;
vap->iv_key_delete = mwl_key_delete;
vap->iv_key_set = mwl_key_set;
#ifdef MWL_HOST_PS_SUPPORT
if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
vap->iv_update_ps = mwl_update_ps;
mvp->mv_set_tim = vap->iv_set_tim;
vap->iv_set_tim = mwl_set_tim;
}
#endif
vap->iv_reset = mwl_reset;
vap->iv_update_beacon = mwl_beacon_update;
/* override max aid so sta's cannot assoc when we're out of sta id's */
vap->iv_max_aid = MWL_MAXSTAID;
/* override default A-MPDU rx parameters */
vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
/* complete setup */
- ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status,
+ mac);
switch (vap->iv_opmode) {
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
case IEEE80211_M_STA:
/*
* Setup sta db entry for local address.
*/
mwl_localstadb(vap);
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_MBSS)
sc->sc_napvaps++;
else
sc->sc_nstavaps++;
break;
case IEEE80211_M_WDS:
sc->sc_nwdsvaps++;
break;
default:
break;
}
/*
* Setup overall operating mode.
*/
if (sc->sc_napvaps)
ic->ic_opmode = IEEE80211_M_HOSTAP;
else if (sc->sc_nstavaps)
ic->ic_opmode = IEEE80211_M_STA;
else
ic->ic_opmode = opmode;
return vap;
}
static void
mwl_vap_delete(struct ieee80211vap *vap)
{
struct mwl_vap *mvp = MWL_VAP(vap);
- struct ifnet *parent = vap->iv_ic->ic_ifp;
- struct mwl_softc *sc = parent->if_softc;
+ struct mwl_softc *sc = vap->iv_ic->ic_softc;
struct mwl_hal *mh = sc->sc_mh;
struct mwl_hal_vap *hvap = mvp->mv_hvap;
enum ieee80211_opmode opmode = vap->iv_opmode;
/* XXX disallow ap vap delete if WDS still present */
- if (parent->if_drv_flags & IFF_DRV_RUNNING) {
+ if (sc->sc_running) {
/* quiesce h/w while we remove the vap */
mwl_hal_intrset(mh, 0); /* disable interrupts */
}
ieee80211_vap_detach(vap);
switch (opmode) {
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
case IEEE80211_M_STA:
KASSERT(hvap != NULL, ("no hal vap handle"));
(void) mwl_hal_delstation(hvap, vap->iv_myaddr);
mwl_hal_delvap(hvap);
if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS)
sc->sc_napvaps--;
else
sc->sc_nstavaps--;
/* XXX don't do it for IEEE80211_CLONE_MACADDR */
reclaim_address(sc, vap->iv_myaddr);
break;
case IEEE80211_M_WDS:
sc->sc_nwdsvaps--;
break;
default:
break;
}
mwl_cleartxq(sc, vap);
free(mvp, M_80211_VAP);
- if (parent->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_running)
mwl_hal_intrset(mh, sc->sc_imask);
}
void
mwl_suspend(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
-
- mwl_stop(ifp, 1);
+ MWL_LOCK(sc);
+ mwl_stop(sc);
+ MWL_UNLOCK(sc);
}
void
mwl_resume(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
+ int error = EDOOFUS;
- DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags %x\n",
- __func__, ifp->if_flags);
+ MWL_LOCK(sc);
+ if (sc->sc_ic.ic_nrunning > 0)
+ error = mwl_init(sc);
+ MWL_UNLOCK(sc);
- if (ifp->if_flags & IFF_UP)
- mwl_init(sc);
+ if (error == 0)
+ ieee80211_start_all(&sc->sc_ic); /* start all vap's */
}
void
mwl_shutdown(void *arg)
{
struct mwl_softc *sc = arg;
- mwl_stop(sc->sc_ifp, 1);
+ MWL_LOCK(sc);
+ mwl_stop(sc);
+ MWL_UNLOCK(sc);
}
/*
* Interrupt handler. Most of the actual processing is deferred.
*/
void
mwl_intr(void *arg)
{
struct mwl_softc *sc = arg;
struct mwl_hal *mh = sc->sc_mh;
uint32_t status;
if (sc->sc_invalid) {
/*
* The hardware is not ready/present, don't touch anything.
* Note this can happen early on if the IRQ is shared.
*/
DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
return;
}
/*
* Figure out the reason(s) for the interrupt.
*/
mwl_hal_getisr(mh, &status); /* NB: clears ISR too */
if (status == 0) /* must be a shared irq */
return;
DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n",
__func__, status, sc->sc_imask);
if (status & MACREG_A2HRIC_BIT_RX_RDY)
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
if (status & MACREG_A2HRIC_BIT_TX_DONE)
taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG)
taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask);
if (status & MACREG_A2HRIC_BIT_OPC_DONE)
mwl_hal_cmddone(mh);
if (status & MACREG_A2HRIC_BIT_MAC_EVENT) {
;
}
if (status & MACREG_A2HRIC_BIT_ICV_ERROR) {
/* TKIP ICV error */
sc->sc_stats.mst_rx_badtkipicv++;
}
if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) {
/* 11n aggregation queue is empty, re-fill */
;
}
if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) {
;
}
if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) {
/* radar detected, process event */
taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask);
}
if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) {
/* DFS channel switch */
taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask);
}
}
static void
mwl_radar_proc(void *arg, int pending)
{
struct mwl_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n",
__func__, pending);
sc->sc_stats.mst_radardetect++;
/* XXX stop h/w BA streams? */
IEEE80211_LOCK(ic);
ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
IEEE80211_UNLOCK(ic);
}
static void
mwl_chanswitch_proc(void *arg, int pending)
{
struct mwl_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n",
__func__, pending);
IEEE80211_LOCK(ic);
sc->sc_csapending = 0;
ieee80211_csa_completeswitch(ic);
IEEE80211_UNLOCK(ic);
}
static void
mwl_bawatchdog(const MWL_HAL_BASTREAM *sp)
{
struct ieee80211_node *ni = sp->data[0];
/* send DELBA and drop the stream */
ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED);
}
static void
mwl_bawatchdog_proc(void *arg, int pending)
{
struct mwl_softc *sc = arg;
struct mwl_hal *mh = sc->sc_mh;
const MWL_HAL_BASTREAM *sp;
uint8_t bitmap, n;
sc->sc_stats.mst_bawatchdog++;
if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) {
DPRINTF(sc, MWL_DEBUG_AMPDU,
"%s: could not get bitmap\n", __func__);
sc->sc_stats.mst_bawatchdog_failed++;
return;
}
DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap);
if (bitmap == 0xff) {
n = 0;
/* disable all ba streams */
for (bitmap = 0; bitmap < 8; bitmap++) {
sp = mwl_hal_bastream_lookup(mh, bitmap);
if (sp != NULL) {
mwl_bawatchdog(sp);
n++;
}
}
if (n == 0) {
DPRINTF(sc, MWL_DEBUG_AMPDU,
"%s: no BA streams found\n", __func__);
sc->sc_stats.mst_bawatchdog_empty++;
}
} else if (bitmap != 0xaa) {
/* disable a single ba stream */
sp = mwl_hal_bastream_lookup(mh, bitmap);
if (sp != NULL) {
mwl_bawatchdog(sp);
} else {
DPRINTF(sc, MWL_DEBUG_AMPDU,
"%s: no BA stream %d\n", __func__, bitmap);
sc->sc_stats.mst_bawatchdog_notfound++;
}
}
}
/*
* Convert net80211 channel to a HAL channel.
*/
static void
mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan)
{
hc->channel = chan->ic_ieee;
*(uint32_t *)&hc->channelFlags = 0;
if (IEEE80211_IS_CHAN_2GHZ(chan))
hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ;
else if (IEEE80211_IS_CHAN_5GHZ(chan))
hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ;
if (IEEE80211_IS_CHAN_HT40(chan)) {
hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH;
if (IEEE80211_IS_CHAN_HT40U(chan))
hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH;
else
hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH;
} else
hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH;
/* XXX 10MHz channels */
}
/*
* Inform firmware of our tx/rx dma setup. The BAR 0
* writes below are for compatibility with older firmware.
* For current firmware we send this information with a
* cmd block via mwl_hal_sethwdma.
*/
static int
mwl_setupdma(struct mwl_softc *sc)
{
int error, i;
sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr;
WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead);
WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead);
for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) {
struct mwl_txq *txq = &sc->sc_txq[i];
sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr;
WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]);
}
sc->sc_hwdma.maxNumTxWcb = mwl_txbuf;
sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES;
error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma);
if (error != 0) {
device_printf(sc->sc_dev,
"unable to setup tx/rx dma; hal status %u\n", error);
/* XXX */
}
return error;
}
/*
* Inform firmware of tx rate parameters.
* Called after a channel change.
*/
static int
mwl_setcurchanrates(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct ieee80211_rateset *rs;
MWL_HAL_TXRATE rates;
memset(&rates, 0, sizeof(rates));
rs = ieee80211_get_suprates(ic, ic->ic_curchan);
/* rate used to send management frames */
rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
/* rate used to send multicast frames */
rates.McastRate = rates.MgtRate;
return mwl_hal_settxrate_auto(sc->sc_mh, &rates);
}
/*
* Inform firmware of tx rate parameters. Called whenever
* user-settable params change and after a channel change.
*/
static int
mwl_setrates(struct ieee80211vap *vap)
{
struct mwl_vap *mvp = MWL_VAP(vap);
struct ieee80211_node *ni = vap->iv_bss;
const struct ieee80211_txparam *tp = ni->ni_txparms;
MWL_HAL_TXRATE rates;
KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
/*
* Update the h/w rate map.
* NB: 0x80 for MCS is passed through unchanged
*/
memset(&rates, 0, sizeof(rates));
/* rate used to send management frames */
rates.MgtRate = tp->mgmtrate;
/* rate used to send multicast frames */
rates.McastRate = tp->mcastrate;
/* while here calculate EAPOL fixed rate cookie */
mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni));
return mwl_hal_settxrate(mvp->mv_hvap,
tp->ucastrate != IEEE80211_FIXED_RATE_NONE ?
RATE_FIXED : RATE_AUTO, &rates);
}
/*
* Setup a fixed xmit rate cookie for EAPOL frames.
*/
static void
mwl_seteapolformat(struct ieee80211vap *vap)
{
struct mwl_vap *mvp = MWL_VAP(vap);
struct ieee80211_node *ni = vap->iv_bss;
enum ieee80211_phymode mode;
uint8_t rate;
KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state));
mode = ieee80211_chan2mode(ni->ni_chan);
/*
* Use legacy rates when operating a mixed HT+non-HT bss.
* NB: this may violate POLA for sta and wds vap's.
*/
if (mode == IEEE80211_MODE_11NA &&
(vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate;
else if (mode == IEEE80211_MODE_11NG &&
(vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0)
rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate;
else
rate = vap->iv_txparms[mode].mgmtrate;
mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni));
}
/*
* Map SKU+country code to region code for radar bin'ing.
*/
static int
mwl_map2regioncode(const struct ieee80211_regdomain *rd)
{
switch (rd->regdomain) {
case SKU_FCC:
case SKU_FCC3:
return DOMAIN_CODE_FCC;
case SKU_CA:
return DOMAIN_CODE_IC;
case SKU_ETSI:
case SKU_ETSI2:
case SKU_ETSI3:
if (rd->country == CTRY_SPAIN)
return DOMAIN_CODE_SPAIN;
if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2)
return DOMAIN_CODE_FRANCE;
/* XXX force 1.3.1 radar type */
return DOMAIN_CODE_ETSI_131;
case SKU_JAPAN:
return DOMAIN_CODE_MKK;
case SKU_ROW:
return DOMAIN_CODE_DGT; /* Taiwan */
case SKU_APAC:
case SKU_APAC2:
case SKU_APAC3:
return DOMAIN_CODE_AUS; /* Australia */
}
/* XXX KOREA? */
return DOMAIN_CODE_FCC; /* XXX? */
}
static int
mwl_hal_reset(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mwl_hal *mh = sc->sc_mh;
mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna);
mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna);
mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE);
mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0);
mwl_chan_set(sc, ic->ic_curchan);
/* NB: RF/RA performance tuned for indoor mode */
mwl_hal_setrateadaptmode(mh, 0);
mwl_hal_setoptimizationlevel(mh,
(ic->ic_flags & IEEE80211_F_BURST) != 0);
mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain));
mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */
mwl_hal_setcfend(mh, 0); /* XXX */
return 1;
}
static int
-mwl_init_locked(struct mwl_softc *sc)
+mwl_init(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
struct mwl_hal *mh = sc->sc_mh;
int error = 0;
- DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
- __func__, ifp->if_flags);
-
MWL_LOCK_ASSERT(sc);
/*
* Stop anything previously setup. This is safe
* whether this is the first time through or not.
*/
- mwl_stop_locked(ifp, 0);
+ mwl_stop(sc);
/*
* Push vap-independent state to the firmware.
*/
if (!mwl_hal_reset(sc)) {
- if_printf(ifp, "unable to reset hardware\n");
+ device_printf(sc->sc_dev, "unable to reset hardware\n");
return EIO;
}
/*
* Setup recv (once); transmit is already good to go.
*/
error = mwl_startrecv(sc);
if (error != 0) {
- if_printf(ifp, "unable to start recv logic\n");
+ device_printf(sc->sc_dev, "unable to start recv logic\n");
return error;
}
/*
* Enable interrupts.
*/
sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY
| MACREG_A2HRIC_BIT_TX_DONE
| MACREG_A2HRIC_BIT_OPC_DONE
#if 0
| MACREG_A2HRIC_BIT_MAC_EVENT
#endif
| MACREG_A2HRIC_BIT_ICV_ERROR
| MACREG_A2HRIC_BIT_RADAR_DETECT
| MACREG_A2HRIC_BIT_CHAN_SWITCH
#if 0
| MACREG_A2HRIC_BIT_QUEUE_EMPTY
#endif
| MACREG_A2HRIC_BIT_BA_WATCHDOG
| MACREQ_A2HRIC_BIT_TX_ACK
;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_running = 1;
mwl_hal_intrset(mh, sc->sc_imask);
callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
return 0;
}
static void
-mwl_init(void *arg)
+mwl_stop(struct mwl_softc *sc)
{
- struct mwl_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- int error = 0;
- DPRINTF(sc, MWL_DEBUG_ANY, "%s: if_flags 0x%x\n",
- __func__, ifp->if_flags);
-
- MWL_LOCK(sc);
- error = mwl_init_locked(sc);
- MWL_UNLOCK(sc);
-
- if (error == 0)
- ieee80211_start_all(ic); /* start all vap's */
-}
-
-static void
-mwl_stop_locked(struct ifnet *ifp, int disable)
-{
- struct mwl_softc *sc = ifp->if_softc;
-
- DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
- __func__, sc->sc_invalid, ifp->if_flags);
-
MWL_LOCK_ASSERT(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ if (sc->sc_running) {
/*
* Shutdown the hardware and driver.
*/
- ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ sc->sc_running = 0;
callout_stop(&sc->sc_watchdog);
sc->sc_tx_timer = 0;
mwl_draintxq(sc);
}
}
-static void
-mwl_stop(struct ifnet *ifp, int disable)
-{
- struct mwl_softc *sc = ifp->if_softc;
-
- MWL_LOCK(sc);
- mwl_stop_locked(ifp, disable);
- MWL_UNLOCK(sc);
-}
-
static int
mwl_reset_vap(struct ieee80211vap *vap, int state)
{
struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
struct ieee80211com *ic = vap->iv_ic;
if (state == IEEE80211_S_RUN)
mwl_setrates(vap);
/* XXX off by 1? */
mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
/* XXX auto? 20/40 split? */
mwl_hal_sethtgi(hvap, (vap->iv_flags_ht &
(IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0);
mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ?
HTPROTECT_NONE : HTPROTECT_AUTO);
/* XXX txpower cap */
/* re-setup beacons */
if (state == IEEE80211_S_RUN &&
(vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_MBSS ||
vap->iv_opmode == IEEE80211_M_IBSS)) {
mwl_setapmode(vap, vap->iv_bss->ni_chan);
mwl_hal_setnprotmode(hvap,
MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
return mwl_beacon_setup(vap);
}
return 0;
}
/*
* Reset the hardware w/o losing operational state.
* Used to to reset or reload hardware state for a vap.
*/
static int
mwl_reset(struct ieee80211vap *vap, u_long cmd)
{
struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
int error = 0;
if (hvap != NULL) { /* WDS, MONITOR, etc. */
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct mwl_softc *sc = ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
struct mwl_hal *mh = sc->sc_mh;
/* XXX handle DWDS sta vap change */
/* XXX do we need to disable interrupts? */
mwl_hal_intrset(mh, 0); /* disable interrupts */
error = mwl_reset_vap(vap, vap->iv_state);
mwl_hal_intrset(mh, sc->sc_imask);
}
return error;
}
/*
* Allocate a tx buffer for sending a frame. The
* packet is assumed to have the WME AC stored so
* we can use it to select the appropriate h/w queue.
*/
static struct mwl_txbuf *
mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq)
{
struct mwl_txbuf *bf;
/*
* Grab a TX buffer and associated resources.
*/
MWL_TXQ_LOCK(txq);
bf = STAILQ_FIRST(&txq->free);
if (bf != NULL) {
STAILQ_REMOVE_HEAD(&txq->free, bf_list);
txq->nfree--;
}
MWL_TXQ_UNLOCK(txq);
if (bf == NULL)
DPRINTF(sc, MWL_DEBUG_XMIT,
"%s: out of xmit buffers on q %d\n", __func__, txq->qnum);
return bf;
}
/*
* Return a tx buffer to the queue it came from. Note there
* are two cases because we must preserve the order of buffers
* as it reflects the fixed order of descriptors in memory
* (the firmware pre-fetches descriptors so we cannot reorder).
*/
static void
mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf)
{
bf->bf_m = NULL;
bf->bf_node = NULL;
MWL_TXQ_LOCK(txq);
STAILQ_INSERT_HEAD(&txq->free, bf, bf_list);
txq->nfree++;
MWL_TXQ_UNLOCK(txq);
}
static void
mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf)
{
bf->bf_m = NULL;
bf->bf_node = NULL;
MWL_TXQ_LOCK(txq);
STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
txq->nfree++;
MWL_TXQ_UNLOCK(txq);
}
+static int
+mwl_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct mwl_softc *sc = ic->ic_softc;
+ int error;
+
+ MWL_LOCK(sc);
+ if (!sc->sc_running) {
+ MWL_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ MWL_UNLOCK(sc);
+ return (error);
+ }
+ mwl_start(sc);
+ MWL_UNLOCK(sc);
+ return (0);
+}
+
static void
-mwl_start(struct ifnet *ifp)
+mwl_start(struct mwl_softc *sc)
{
- struct mwl_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mwl_txbuf *bf;
struct mbuf *m;
struct mwl_txq *txq = NULL; /* XXX silence gcc */
int nqueued;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
+ MWL_LOCK_ASSERT(sc);
+ if (!sc->sc_running || sc->sc_invalid)
return;
nqueued = 0;
- for (;;) {
- bf = NULL;
- IFQ_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
/*
* Grab the node for the destination.
*/
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
KASSERT(ni != NULL, ("no node"));
m->m_pkthdr.rcvif = NULL; /* committed, clear ref */
/*
* Grab a TX buffer and associated resources.
* We honor the classification by the 802.11 layer.
*/
txq = sc->sc_ac2q[M_WME_GETAC(m)];
bf = mwl_gettxbuf(sc, txq);
if (bf == NULL) {
m_freem(m);
ieee80211_free_node(ni);
#ifdef MWL_TX_NODROP
sc->sc_stats.mst_tx_qstop++;
- /* XXX blocks other traffic */
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;
#else
DPRINTF(sc, MWL_DEBUG_XMIT,
"%s: tail drop on q %d\n", __func__, txq->qnum);
sc->sc_stats.mst_tx_qdrop++;
continue;
#endif /* MWL_TX_NODROP */
}
/*
* Pass the frame to the h/w for transmission.
*/
if (mwl_tx_start(sc, ni, bf, m)) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
mwl_puttxbuf_head(txq, bf);
ieee80211_free_node(ni);
continue;
}
nqueued++;
if (nqueued >= mwl_txcoalesce) {
/*
* Poke the firmware to process queued frames;
* see below about (lack of) locking.
*/
nqueued = 0;
mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
}
}
if (nqueued) {
/*
* NB: We don't need to lock against tx done because
* this just prods the firmware to check the transmit
* descriptors. The firmware will also start fetching
* descriptors by itself if it notices new ones are
* present when it goes to deliver a tx done interrupt
* to the host. So if we race with tx done processing
* it's ok. Delivering the kick here rather than in
* mwl_tx_start is an optimization to avoid poking the
* firmware for each packet.
*
* NB: the queue id isn't used so 0 is ok.
*/
mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
}
}
static int
mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct mwl_softc *sc = ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
struct mwl_txbuf *bf;
struct mwl_txq *txq;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
+ if (!sc->sc_running || sc->sc_invalid) {
ieee80211_free_node(ni);
m_freem(m);
return ENETDOWN;
}
/*
* Grab a TX buffer and associated resources.
* Note that we depend on the classification
* by the 802.11 layer to get to the right h/w
* queue. Management frames must ALWAYS go on
* queue 1 but we cannot just force that here
* because we may receive non-mgt frames.
*/
txq = sc->sc_ac2q[M_WME_GETAC(m)];
bf = mwl_gettxbuf(sc, txq);
if (bf == NULL) {
sc->sc_stats.mst_tx_qstop++;
- /* XXX blocks other traffic */
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
ieee80211_free_node(ni);
m_freem(m);
return ENOBUFS;
}
/*
* Pass the frame to the h/w for transmission.
*/
if (mwl_tx_start(sc, ni, bf, m)) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
mwl_puttxbuf_head(txq, bf);
ieee80211_free_node(ni);
return EIO; /* XXX */
}
/*
* NB: We don't need to lock against tx done because
* this just prods the firmware to check the transmit
* descriptors. The firmware will also start fetching
* descriptors by itself if it notices new ones are
* present when it goes to deliver a tx done interrupt
* to the host. So if we race with tx done processing
* it's ok. Delivering the kick here rather than in
* mwl_tx_start is an optimization to avoid poking the
* firmware for each packet.
*
* NB: the queue id isn't used so 0 is ok.
*/
mwl_hal_txstart(sc->sc_mh, 0/*XXX*/);
return 0;
}
static int
mwl_media_change(struct ifnet *ifp)
{
struct ieee80211vap *vap = ifp->if_softc;
int error;
error = ieee80211_media_change(ifp);
/* NB: only the fixed rate can change and that doesn't need a reset */
if (error == ENETRESET) {
mwl_setrates(vap);
error = 0;
}
return error;
}
#ifdef MWL_DEBUG
static void
mwl_keyprint(struct mwl_softc *sc, const char *tag,
const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN])
{
static const char *ciphers[] = {
"WEP",
"TKIP",
"AES-CCM",
};
int i, n;
printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]);
for (i = 0, n = hk->keyLen; i < n; i++)
printf(" %02x", hk->key.aes[i]);
printf(" mac %s", ether_sprintf(mac));
if (hk->keyTypeId == KEY_TYPE_ID_TKIP) {
printf(" %s", "rxmic");
for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++)
printf(" %02x", hk->key.tkip.rxMic[i]);
printf(" txmic");
for (i = 0; i < sizeof(hk->key.tkip.txMic); i++)
printf(" %02x", hk->key.tkip.txMic[i]);
}
printf(" flags 0x%x\n", hk->keyFlags);
}
#endif
/*
* Allocate a key cache slot for a unicast key. The
* firmware handles key allocation and every station is
* guaranteed key space so we are always successful.
*/
static int
mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
{
- struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = vap->iv_ic->ic_softc;
if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
(k->wk_flags & IEEE80211_KEY_GROUP)) {
if (!(&vap->iv_nw_keys[0] <= k &&
k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
/* should not happen */
DPRINTF(sc, MWL_DEBUG_KEYCACHE,
"%s: bogus group key\n", __func__);
return 0;
}
/* give the caller what they requested */
*keyix = *rxkeyix = k - vap->iv_nw_keys;
} else {
/*
* Firmware handles key allocation.
*/
*keyix = *rxkeyix = 0;
}
return 1;
}
/*
* Delete a key entry allocated by mwl_key_alloc.
*/
static int
mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
{
- struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = vap->iv_ic->ic_softc;
struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
MWL_HAL_KEYVAL hk;
const uint8_t bcastaddr[IEEE80211_ADDR_LEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
if (hvap == NULL) {
if (vap->iv_opmode != IEEE80211_M_WDS) {
/* XXX monitor mode? */
DPRINTF(sc, MWL_DEBUG_KEYCACHE,
"%s: no hvap for opmode %d\n", __func__,
vap->iv_opmode);
return 0;
}
hvap = MWL_VAP(vap)->mv_ap_hvap;
}
DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n",
__func__, k->wk_keyix);
memset(&hk, 0, sizeof(hk));
hk.keyIndex = k->wk_keyix;
switch (k->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_WEP:
hk.keyTypeId = KEY_TYPE_ID_WEP;
break;
case IEEE80211_CIPHER_TKIP:
hk.keyTypeId = KEY_TYPE_ID_TKIP;
break;
case IEEE80211_CIPHER_AES_CCM:
hk.keyTypeId = KEY_TYPE_ID_AES;
break;
default:
/* XXX should not happen */
DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
__func__, k->wk_cipher->ic_cipher);
return 0;
}
return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/
}
static __inline int
addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k)
{
if (k->wk_flags & IEEE80211_KEY_GROUP) {
if (k->wk_flags & IEEE80211_KEY_XMIT)
hk->keyFlags |= KEY_FLAG_TXGROUPKEY;
if (k->wk_flags & IEEE80211_KEY_RECV)
hk->keyFlags |= KEY_FLAG_RXGROUPKEY;
return 1;
} else
return 0;
}
/*
* Set the key cache contents for the specified key. Key cache
* slot(s) must already have been allocated by mwl_key_alloc.
*/
static int
mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
const uint8_t mac[IEEE80211_ADDR_LEN])
{
#define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP)
/* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */
#define IEEE80211_IS_STATICKEY(k) \
(((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \
(GRPXMIT|IEEE80211_KEY_RECV))
- struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = vap->iv_ic->ic_softc;
struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
const struct ieee80211_cipher *cip = k->wk_cipher;
const uint8_t *macaddr;
MWL_HAL_KEYVAL hk;
KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0,
("s/w crypto set?"));
if (hvap == NULL) {
if (vap->iv_opmode != IEEE80211_M_WDS) {
/* XXX monitor mode? */
DPRINTF(sc, MWL_DEBUG_KEYCACHE,
"%s: no hvap for opmode %d\n", __func__,
vap->iv_opmode);
return 0;
}
hvap = MWL_VAP(vap)->mv_ap_hvap;
}
memset(&hk, 0, sizeof(hk));
hk.keyIndex = k->wk_keyix;
switch (cip->ic_cipher) {
case IEEE80211_CIPHER_WEP:
hk.keyTypeId = KEY_TYPE_ID_WEP;
hk.keyLen = k->wk_keylen;
if (k->wk_keyix == vap->iv_def_txkey)
hk.keyFlags = KEY_FLAG_WEP_TXKEY;
if (!IEEE80211_IS_STATICKEY(k)) {
/* NB: WEP is never used for the PTK */
(void) addgroupflags(&hk, k);
}
break;
case IEEE80211_CIPHER_TKIP:
hk.keyTypeId = KEY_TYPE_ID_TKIP;
hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16);
hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc;
hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID;
hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE;
if (!addgroupflags(&hk, k))
hk.keyFlags |= KEY_FLAG_PAIRWISE;
break;
case IEEE80211_CIPHER_AES_CCM:
hk.keyTypeId = KEY_TYPE_ID_AES;
hk.keyLen = k->wk_keylen;
if (!addgroupflags(&hk, k))
hk.keyFlags |= KEY_FLAG_PAIRWISE;
break;
default:
/* XXX should not happen */
DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n",
__func__, k->wk_cipher->ic_cipher);
return 0;
}
/*
* NB: tkip mic keys get copied here too; the layout
* just happens to match that in ieee80211_key.
*/
memcpy(hk.key.aes, k->wk_key, hk.keyLen);
/*
* Locate address of sta db entry for writing key;
* the convention unfortunately is somewhat different
* than how net80211, hostapd, and wpa_supplicant think.
*/
if (vap->iv_opmode == IEEE80211_M_STA) {
/*
* NB: keys plumbed before the sta reaches AUTH state
* will be discarded or written to the wrong sta db
* entry because iv_bss is meaningless. This is ok
* (right now) because we handle deferred plumbing of
* WEP keys when the sta reaches AUTH state.
*/
macaddr = vap->iv_bss->ni_bssid;
if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) {
/* XXX plumb to local sta db too for static key wep */
mwl_hal_keyset(hvap, &hk, vap->iv_myaddr);
}
} else if (vap->iv_opmode == IEEE80211_M_WDS &&
vap->iv_state != IEEE80211_S_RUN) {
/*
* Prior to RUN state a WDS vap will not it's BSS node
* setup so we will plumb the key to the wrong mac
* address (it'll be our local address). Workaround
* this for the moment by grabbing the correct address.
*/
macaddr = vap->iv_des_bssid;
} else if ((k->wk_flags & GRPXMIT) == GRPXMIT)
macaddr = vap->iv_myaddr;
else
macaddr = mac;
KEYPRINTF(sc, &hk, macaddr);
return (mwl_hal_keyset(hvap, &hk, macaddr) == 0);
#undef IEEE80211_IS_STATICKEY
#undef GRPXMIT
}
/* unaligned little endian access */
#define LE_READ_2(p) \
((uint16_t) \
((((const uint8_t *)(p))[0] ) | \
(((const uint8_t *)(p))[1] << 8)))
#define LE_READ_4(p) \
((uint32_t) \
((((const uint8_t *)(p))[0] ) | \
(((const uint8_t *)(p))[1] << 8) | \
(((const uint8_t *)(p))[2] << 16) | \
(((const uint8_t *)(p))[3] << 24)))
/*
* Set the multicast filter contents into the hardware.
* XXX f/w has no support; just defer to the os.
*/
static void
mwl_setmcastfilter(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
#if 0
struct ether_multi *enm;
struct ether_multistep estep;
uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */
uint8_t *mp;
int nmc;
mp = macs;
nmc = 0;
ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm);
while (enm != NULL) {
/* XXX Punt on ranges. */
if (nmc == MWL_HAL_MCAST_MAX ||
!IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) {
ifp->if_flags |= IFF_ALLMULTI;
return;
}
IEEE80211_ADDR_COPY(mp, enm->enm_addrlo);
mp += IEEE80211_ADDR_LEN, nmc++;
ETHER_NEXT_MULTI(estep, enm);
}
ifp->if_flags &= ~IFF_ALLMULTI;
mwl_hal_setmcast(sc->sc_mh, nmc, macs);
-#else
- /* XXX no mcast filter support; we get everything */
- ifp->if_flags |= IFF_ALLMULTI;
#endif
}
static int
mwl_mode_init(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mwl_hal *mh = sc->sc_mh;
/*
* NB: Ignore promisc in hostap mode; it's set by the
* bridge. This is wrong but we have no way to
* identify internal requests (from the bridge)
* versus external requests such as for tcpdump.
*/
- mwl_hal_setpromisc(mh, (ifp->if_flags & IFF_PROMISC) &&
+ mwl_hal_setpromisc(mh, ic->ic_promisc > 0 &&
ic->ic_opmode != IEEE80211_M_HOSTAP);
mwl_setmcastfilter(sc);
return 0;
}
/*
* Callback from the 802.11 layer after a multicast state change.
*/
static void
mwl_update_mcast(struct ieee80211com *ic)
{
struct mwl_softc *sc = ic->ic_softc;
mwl_setmcastfilter(sc);
}
/*
* Callback from the 802.11 layer after a promiscuous mode change.
* Note this interface does not check the operating mode as this
* is an internal callback and we are expected to honor the current
* state (e.g. this is used for setting the interface in promiscuous
* mode when operating in hostap mode to do ACS).
*/
static void
mwl_update_promisc(struct ieee80211com *ic)
{
struct mwl_softc *sc = ic->ic_softc;
- mwl_hal_setpromisc(sc->sc_mh,
- (ic->ic_ifp->if_flags & IFF_PROMISC) != 0);
+ mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0);
}
/*
* Callback from the 802.11 layer to update the slot time
* based on the current setting. We use it to notify the
* firmware of ERP changes and the f/w takes care of things
* like slot time and preamble.
*/
static void
mwl_updateslot(struct ieee80211com *ic)
{
struct mwl_softc *sc = ic->ic_softc;
struct mwl_hal *mh = sc->sc_mh;
int prot;
/* NB: can be called early; suppress needless cmds */
- if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ if (!sc->sc_running)
return;
/*
* Calculate the ERP flags. The firwmare will use
* this to carry out the appropriate measures.
*/
prot = 0;
if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0)
prot |= IEEE80211_ERP_NON_ERP_PRESENT;
if (ic->ic_flags & IEEE80211_F_USEPROT)
prot |= IEEE80211_ERP_USE_PROTECTION;
if (ic->ic_flags & IEEE80211_F_USEBARKER)
prot |= IEEE80211_ERP_LONG_PREAMBLE;
}
DPRINTF(sc, MWL_DEBUG_RESET,
"%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n",
__func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot,
ic->ic_flags);
mwl_hal_setgprot(mh, prot);
}
/*
* Setup the beacon frame.
*/
static int
mwl_beacon_setup(struct ieee80211vap *vap)
{
struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
struct ieee80211_node *ni = vap->iv_bss;
struct ieee80211_beacon_offsets bo;
struct mbuf *m;
m = ieee80211_beacon_alloc(ni, &bo);
if (m == NULL)
return ENOBUFS;
mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len);
m_free(m);
return 0;
}
/*
* Update the beacon frame in response to a change.
*/
static void
mwl_beacon_update(struct ieee80211vap *vap, int item)
{
struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
struct ieee80211com *ic = vap->iv_ic;
KASSERT(hvap != NULL, ("no beacon"));
switch (item) {
case IEEE80211_BEACON_ERP:
mwl_updateslot(ic);
break;
case IEEE80211_BEACON_HTINFO:
mwl_hal_setnprotmode(hvap,
MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE));
break;
case IEEE80211_BEACON_CAPS:
case IEEE80211_BEACON_WME:
case IEEE80211_BEACON_APPIE:
case IEEE80211_BEACON_CSA:
break;
case IEEE80211_BEACON_TIM:
/* NB: firmware always forms TIM */
return;
}
/* XXX retain beacon frame and update */
mwl_beacon_setup(vap);
}
static void
mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
bus_addr_t *paddr = (bus_addr_t*) arg;
KASSERT(error == 0, ("error %u on bus_dma callback", error));
*paddr = segs->ds_addr;
}
#ifdef MWL_HOST_PS_SUPPORT
/*
* Handle power save station occupancy changes.
*/
static void
mwl_update_ps(struct ieee80211vap *vap, int nsta)
{
struct mwl_vap *mvp = MWL_VAP(vap);
if (nsta == 0 || mvp->mv_last_ps_sta == 0)
mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta);
mvp->mv_last_ps_sta = nsta;
}
/*
* Handle associated station power save state changes.
*/
static int
mwl_set_tim(struct ieee80211_node *ni, int set)
{
struct ieee80211vap *vap = ni->ni_vap;
struct mwl_vap *mvp = MWL_VAP(vap);
if (mvp->mv_set_tim(ni, set)) { /* NB: state change */
mwl_hal_setpowersave_sta(mvp->mv_hvap,
IEEE80211_AID(ni->ni_associd), set);
return 1;
} else
return 0;
}
#endif /* MWL_HOST_PS_SUPPORT */
static int
mwl_desc_setup(struct mwl_softc *sc, const char *name,
struct mwl_descdma *dd,
int nbuf, size_t bufsize, int ndesc, size_t descsize)
{
- struct ifnet *ifp = sc->sc_ifp;
uint8_t *ds;
int error;
DPRINTF(sc, MWL_DEBUG_RESET,
"%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n",
__func__, name, nbuf, (uintmax_t) bufsize,
ndesc, (uintmax_t) descsize);
dd->dd_name = name;
dd->dd_desc_len = nbuf * ndesc * descsize;
/*
* Setup DMA descriptor area.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
PAGE_SIZE, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
dd->dd_desc_len, /* maxsize */
1, /* nsegments */
dd->dd_desc_len, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&dd->dd_dmat);
if (error != 0) {
- if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
+ device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name);
return error;
}
/* allocate descriptors */
error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
&dd->dd_dmamap);
if (error != 0) {
- if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
+ device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, "
"error %u\n", nbuf * ndesc, dd->dd_name, error);
goto fail1;
}
error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
dd->dd_desc, dd->dd_desc_len,
mwl_load_cb, &dd->dd_desc_paddr,
BUS_DMA_NOWAIT);
if (error != 0) {
- if_printf(ifp, "unable to map %s descriptors, error %u\n",
+ device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n",
dd->dd_name, error);
goto fail2;
}
ds = dd->dd_desc;
memset(ds, 0, dd->dd_desc_len);
DPRINTF(sc, MWL_DEBUG_RESET,
"%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n",
__func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
(uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
return 0;
fail2:
bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
fail1:
bus_dma_tag_destroy(dd->dd_dmat);
memset(dd, 0, sizeof(*dd));
return error;
#undef DS2PHYS
}
static void
mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd)
{
bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
bus_dma_tag_destroy(dd->dd_dmat);
memset(dd, 0, sizeof(*dd));
}
/*
* Construct a tx q's free list. The order of entries on
* the list must reflect the physical layout of tx descriptors
* because the firmware pre-fetches descriptors.
*
* XXX might be better to use indices into the buffer array.
*/
static void
mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq)
{
struct mwl_txbuf *bf;
int i;
bf = txq->dma.dd_bufptr;
STAILQ_INIT(&txq->free);
for (i = 0; i < mwl_txbuf; i++, bf++)
STAILQ_INSERT_TAIL(&txq->free, bf, bf_list);
txq->nfree = i;
}
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
static int
mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq)
{
- struct ifnet *ifp = sc->sc_ifp;
int error, bsize, i;
struct mwl_txbuf *bf;
struct mwl_txdesc *ds;
error = mwl_desc_setup(sc, "tx", &txq->dma,
mwl_txbuf, sizeof(struct mwl_txbuf),
MWL_TXDESC, sizeof(struct mwl_txdesc));
if (error != 0)
return error;
/* allocate and setup tx buffers */
bsize = mwl_txbuf * sizeof(struct mwl_txbuf);
bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
if (bf == NULL) {
- if_printf(ifp, "malloc of %u tx buffers failed\n",
+ device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n",
mwl_txbuf);
return ENOMEM;
}
txq->dma.dd_bufptr = bf;
ds = txq->dma.dd_desc;
for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) {
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(&txq->dma, ds);
error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
&bf->bf_dmamap);
if (error != 0) {
- if_printf(ifp, "unable to create dmamap for tx "
+ device_printf(sc->sc_dev, "unable to create dmamap for tx "
"buffer %u, error %u\n", i, error);
return error;
}
}
mwl_txq_reset(sc, txq);
return 0;
}
static void
mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq)
{
struct mwl_txbuf *bf;
int i;
bf = txq->dma.dd_bufptr;
for (i = 0; i < mwl_txbuf; i++, bf++) {
KASSERT(bf->bf_m == NULL, ("mbuf on free list"));
KASSERT(bf->bf_node == NULL, ("node on free list"));
if (bf->bf_dmamap != NULL)
bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
}
STAILQ_INIT(&txq->free);
txq->nfree = 0;
if (txq->dma.dd_bufptr != NULL) {
free(txq->dma.dd_bufptr, M_MWLDEV);
txq->dma.dd_bufptr = NULL;
}
if (txq->dma.dd_desc_len != 0)
mwl_desc_cleanup(sc, &txq->dma);
}
static int
mwl_rxdma_setup(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
int error, jumbosize, bsize, i;
struct mwl_rxbuf *bf;
struct mwl_jumbo *rbuf;
struct mwl_rxdesc *ds;
caddr_t data;
error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma,
mwl_rxdesc, sizeof(struct mwl_rxbuf),
1, sizeof(struct mwl_rxdesc));
if (error != 0)
return error;
/*
* Receive is done to a private pool of jumbo buffers.
* This allows us to attach to mbuf's and avoid re-mapping
* memory on each rx we post. We allocate a large chunk
* of memory and manage it in the driver. The mbuf free
* callback method is used to reclaim frames after sending
* them up the stack. By default we allocate 2x the number of
* rx descriptors configured so we have some slop to hold
* us while frames are processed.
*/
if (mwl_rxbuf < 2*mwl_rxdesc) {
- if_printf(ifp,
+ device_printf(sc->sc_dev,
"too few rx dma buffers (%d); increasing to %d\n",
mwl_rxbuf, 2*mwl_rxdesc);
mwl_rxbuf = 2*mwl_rxdesc;
}
jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE);
sc->sc_rxmemsize = mwl_rxbuf*jumbosize;
error = bus_dma_tag_create(sc->sc_dmat, /* parent */
PAGE_SIZE, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
sc->sc_rxmemsize, /* maxsize */
1, /* nsegments */
sc->sc_rxmemsize, /* maxsegsize */
BUS_DMA_ALLOCNOW, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&sc->sc_rxdmat);
if (error != 0) {
- if_printf(ifp, "could not create rx DMA tag\n");
+ device_printf(sc->sc_dev, "could not create rx DMA tag\n");
return error;
}
error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem,
BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
&sc->sc_rxmap);
if (error != 0) {
- if_printf(ifp, "could not alloc %ju bytes of rx DMA memory\n",
+ device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n",
(uintmax_t) sc->sc_rxmemsize);
return error;
}
error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap,
sc->sc_rxmem, sc->sc_rxmemsize,
mwl_load_cb, &sc->sc_rxmem_paddr,
BUS_DMA_NOWAIT);
if (error != 0) {
- if_printf(ifp, "could not load rx DMA map\n");
+ device_printf(sc->sc_dev, "could not load rx DMA map\n");
return error;
}
/*
* Allocate rx buffers and set them up.
*/
bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf);
bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO);
if (bf == NULL) {
- if_printf(ifp, "malloc of %u rx buffers failed\n", bsize);
+ device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize);
return error;
}
sc->sc_rxdma.dd_bufptr = bf;
STAILQ_INIT(&sc->sc_rxbuf);
ds = sc->sc_rxdma.dd_desc;
for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) {
bf->bf_desc = ds;
bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds);
/* pre-assign dma buffer */
bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
/* NB: tail is intentional to preserve descriptor order */
STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
}
/*
* Place remainder of dma memory buffers on the free list.
*/
SLIST_INIT(&sc->sc_rxfree);
for (; i < mwl_rxbuf; i++) {
data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize);
rbuf = MWL_JUMBO_DATA2BUF(data);
SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next);
sc->sc_nrxfree++;
}
return 0;
}
#undef DS2PHYS
static void
mwl_rxdma_cleanup(struct mwl_softc *sc)
{
if (sc->sc_rxmem_paddr != 0) {
bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap);
sc->sc_rxmem_paddr = 0;
}
if (sc->sc_rxmem != NULL) {
bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap);
sc->sc_rxmem = NULL;
}
if (sc->sc_rxdma.dd_bufptr != NULL) {
free(sc->sc_rxdma.dd_bufptr, M_MWLDEV);
sc->sc_rxdma.dd_bufptr = NULL;
}
if (sc->sc_rxdma.dd_desc_len != 0)
mwl_desc_cleanup(sc, &sc->sc_rxdma);
}
static int
mwl_dma_setup(struct mwl_softc *sc)
{
int error, i;
error = mwl_rxdma_setup(sc);
if (error != 0) {
mwl_rxdma_cleanup(sc);
return error;
}
for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
error = mwl_txdma_setup(sc, &sc->sc_txq[i]);
if (error != 0) {
mwl_dma_cleanup(sc);
return error;
}
}
return 0;
}
static void
mwl_dma_cleanup(struct mwl_softc *sc)
{
int i;
for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
mwl_txdma_cleanup(sc, &sc->sc_txq[i]);
mwl_rxdma_cleanup(sc);
}
static struct ieee80211_node *
mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ieee80211com *ic = vap->iv_ic;
- struct mwl_softc *sc = ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
const size_t space = sizeof(struct mwl_node);
struct mwl_node *mn;
mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
if (mn == NULL) {
/* XXX stat+msg */
return NULL;
}
DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn);
return &mn->mn_node;
}
static void
mwl_node_cleanup(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
- struct mwl_softc *sc = ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
struct mwl_node *mn = MWL_NODE(ni);
DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n",
__func__, ni, ni->ni_ic, mn->mn_staid);
if (mn->mn_staid != 0) {
struct ieee80211vap *vap = ni->ni_vap;
if (mn->mn_hvap != NULL) {
if (vap->iv_opmode == IEEE80211_M_STA)
mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr);
else
mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr);
}
/*
* NB: legacy WDS peer sta db entry is installed using
* the associate ap's hvap; use it again to delete it.
* XXX can vap be NULL?
*/
else if (vap->iv_opmode == IEEE80211_M_WDS &&
MWL_VAP(vap)->mv_ap_hvap != NULL)
mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap,
ni->ni_macaddr);
delstaid(sc, mn->mn_staid);
mn->mn_staid = 0;
}
sc->sc_node_cleanup(ni);
}
/*
* Reclaim rx dma buffers from packets sitting on the ampdu
* reorder queue for a station. We replace buffers with a
* system cluster (if available).
*/
static void
mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap)
{
#if 0
int i, n, off;
struct mbuf *m;
void *cl;
n = rap->rxa_qframes;
for (i = 0; i < rap->rxa_wnd && n > 0; i++) {
m = rap->rxa_m[i];
if (m == NULL)
continue;
n--;
/* our dma buffers have a well-known free routine */
if ((m->m_flags & M_EXT) == 0 ||
m->m_ext.ext_free != mwl_ext_free)
continue;
/*
* Try to allocate a cluster and move the data.
*/
off = m->m_data - m->m_ext.ext_buf;
if (off + m->m_pkthdr.len > MCLBYTES) {
/* XXX no AMSDU for now */
continue;
}
cl = pool_cache_get_paddr(&mclpool_cache, 0,
&m->m_ext.ext_paddr);
if (cl != NULL) {
/*
* Copy the existing data to the cluster, remove
* the rx dma buffer, and attach the cluster in
* its place. Note we preserve the offset to the
* data so frames being bridged can still prepend
* their headers without adding another mbuf.
*/
memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len);
MEXTREMOVE(m);
MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache);
/* setup mbuf like _MCLGET does */
m->m_flags |= M_CLUSTER | M_EXT_RW;
_MOWNERREF(m, M_EXT | M_CLUSTER);
/* NB: m_data is clobbered by MEXTADDR, adjust */
m->m_data += off;
}
}
#endif
}
/*
* Callback to reclaim resources. We first let the
* net80211 layer do it's thing, then if we are still
* blocked by a lack of rx dma buffers we walk the ampdu
* reorder q's to reclaim buffers by copying to a system
* cluster.
*/
static void
mwl_node_drain(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
- struct mwl_softc *sc = ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
struct mwl_node *mn = MWL_NODE(ni);
DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n",
__func__, ni, ni->ni_vap, mn->mn_staid);
/* NB: call up first to age out ampdu q's */
sc->sc_node_drain(ni);
/* XXX better to not check low water mark? */
if (sc->sc_rxblocked && mn->mn_staid != 0 &&
(ni->ni_flags & IEEE80211_NODE_HT)) {
uint8_t tid;
/*
* Walk the reorder q and reclaim rx dma buffers by copying
* the packet contents into clusters.
*/
for (tid = 0; tid < WME_NUM_TID; tid++) {
struct ieee80211_rx_ampdu *rap;
rap = &ni->ni_rx_ampdu[tid];
if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0)
continue;
if (rap->rxa_qframes)
mwl_ampdu_rxdma_reclaim(rap);
}
}
}
static void
mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
{
*rssi = ni->ni_ic->ic_node_getrssi(ni);
#ifdef MWL_ANT_INFO_SUPPORT
#if 0
/* XXX need to smooth data */
*noise = -MWL_NODE_CONST(ni)->mn_ai.nf;
#else
*noise = -95; /* XXX */
#endif
#else
*noise = -95; /* XXX */
#endif
}
/*
* Convert Hardware per-antenna rssi info to common format:
* Let a1, a2, a3 represent the amplitudes per chain
* Let amax represent max[a1, a2, a3]
* Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax)
* Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax)
* We store a table that is 4*20*log10(idx) - the extra 4 is to store or
* maintain some extra precision.
*
* Values are stored in .5 db format capped at 127.
*/
static void
mwl_node_getmimoinfo(const struct ieee80211_node *ni,
struct ieee80211_mimo_info *mi)
{
#define CVT(_dst, _src) do { \
(_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \
(_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \
} while (0)
static const int8_t logdbtbl[32] = {
0, 0, 24, 38, 48, 56, 62, 68,
72, 76, 80, 83, 86, 89, 92, 94,
96, 98, 100, 102, 104, 106, 107, 109,
110, 112, 113, 115, 116, 117, 118, 119
};
const struct mwl_node *mn = MWL_NODE_CONST(ni);
uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */
uint32_t rssi_max;
rssi_max = mn->mn_ai.rssi_a;
if (mn->mn_ai.rssi_b > rssi_max)
rssi_max = mn->mn_ai.rssi_b;
if (mn->mn_ai.rssi_c > rssi_max)
rssi_max = mn->mn_ai.rssi_c;
CVT(mi->rssi[0], mn->mn_ai.rssi_a);
CVT(mi->rssi[1], mn->mn_ai.rssi_b);
CVT(mi->rssi[2], mn->mn_ai.rssi_c);
mi->noise[0] = mn->mn_ai.nf_a;
mi->noise[1] = mn->mn_ai.nf_b;
mi->noise[2] = mn->mn_ai.nf_c;
#undef CVT
}
static __inline void *
mwl_getrxdma(struct mwl_softc *sc)
{
struct mwl_jumbo *buf;
void *data;
/*
* Allocate from jumbo pool.
*/
MWL_RXFREE_LOCK(sc);
buf = SLIST_FIRST(&sc->sc_rxfree);
if (buf == NULL) {
DPRINTF(sc, MWL_DEBUG_ANY,
"%s: out of rx dma buffers\n", __func__);
sc->sc_stats.mst_rx_nodmabuf++;
data = NULL;
} else {
SLIST_REMOVE_HEAD(&sc->sc_rxfree, next);
sc->sc_nrxfree--;
data = MWL_JUMBO_BUF2DATA(buf);
}
MWL_RXFREE_UNLOCK(sc);
return data;
}
static __inline void
mwl_putrxdma(struct mwl_softc *sc, void *data)
{
struct mwl_jumbo *buf;
/* XXX bounds check data */
MWL_RXFREE_LOCK(sc);
buf = MWL_JUMBO_DATA2BUF(data);
SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next);
sc->sc_nrxfree++;
MWL_RXFREE_UNLOCK(sc);
}
static int
mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf)
{
struct mwl_rxdesc *ds;
ds = bf->bf_desc;
if (bf->bf_data == NULL) {
bf->bf_data = mwl_getrxdma(sc);
if (bf->bf_data == NULL) {
/* mark descriptor to be skipped */
ds->RxControl = EAGLE_RXD_CTRL_OS_OWN;
/* NB: don't need PREREAD */
MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE);
sc->sc_stats.mst_rxbuf_failed++;
return ENOMEM;
}
}
/*
* NB: DMA buffer contents is known to be unmodified
* so there's no need to flush the data cache.
*/
/*
* Setup descriptor.
*/
ds->QosCtrl = 0;
ds->RSSI = 0;
ds->Status = EAGLE_RXD_STATUS_IDLE;
ds->Channel = 0;
ds->PktLen = htole16(MWL_AGGR_SIZE);
ds->SQ2 = 0;
ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data));
/* NB: don't touch pPhysNext, set once */
ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
return 0;
}
static void
mwl_ext_free(struct mbuf *m, void *data, void *arg)
{
struct mwl_softc *sc = arg;
/* XXX bounds check data */
mwl_putrxdma(sc, data);
/*
* If we were previously blocked by a lack of rx dma buffers
* check if we now have enough to restart rx interrupt handling.
* NB: we know we are called at splvm which is above splnet.
*/
if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) {
sc->sc_rxblocked = 0;
mwl_hal_intrset(sc->sc_mh, sc->sc_imask);
}
}
struct mwl_frame_bar {
u_int8_t i_fc[2];
u_int8_t i_dur[2];
u_int8_t i_ra[IEEE80211_ADDR_LEN];
u_int8_t i_ta[IEEE80211_ADDR_LEN];
/* ctl, seq, FCS */
} __packed;
/*
* Like ieee80211_anyhdrsize, but handles BAR frames
* specially so the logic below to piece the 802.11
* header together works.
*/
static __inline int
mwl_anyhdrsize(const void *data)
{
const struct ieee80211_frame *wh = data;
if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
case IEEE80211_FC0_SUBTYPE_CTS:
case IEEE80211_FC0_SUBTYPE_ACK:
return sizeof(struct ieee80211_frame_ack);
case IEEE80211_FC0_SUBTYPE_BAR:
return sizeof(struct mwl_frame_bar);
}
return sizeof(struct ieee80211_frame_min);
} else
return ieee80211_hdrsize(data);
}
static void
mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data)
{
const struct ieee80211_frame *wh;
struct ieee80211_node *ni;
wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t));
ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
if (ni != NULL) {
ieee80211_notify_michael_failure(ni->ni_vap, wh, 0);
ieee80211_free_node(ni);
}
}
/*
* Convert hardware signal strength to rssi. The value
* provided by the device has the noise floor added in;
* we need to compensate for this but we don't have that
* so we use a fixed value.
*
* The offset of 8 is good for both 2.4 and 5GHz. The LNA
* offset is already set as part of the initial gain. This
* will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz.
*/
static __inline int
cvtrssi(uint8_t ssi)
{
int rssi = (int) ssi + 8;
/* XXX hack guess until we have a real noise floor */
rssi = 2*(87 - rssi); /* NB: .5 dBm units */
return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi);
}
static void
mwl_rx_proc(void *arg, int npending)
{
#define IEEE80211_DIR_DSTODS(wh) \
((((const struct ieee80211_frame *)wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
struct mwl_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mwl_rxbuf *bf;
struct mwl_rxdesc *ds;
struct mbuf *m;
struct ieee80211_qosframe *wh;
struct ieee80211_qosframe_addr4 *wh4;
struct ieee80211_node *ni;
struct mwl_node *mn;
int off, len, hdrlen, pktlen, rssi, ntodo;
uint8_t *data, status;
void *newdata;
int16_t nf;
DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n",
__func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead),
RD4(sc, sc->sc_hwspecs.rxDescWrite));
nf = -96; /* XXX */
bf = sc->sc_rxnext;
for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) {
if (bf == NULL)
bf = STAILQ_FIRST(&sc->sc_rxbuf);
ds = bf->bf_desc;
data = bf->bf_data;
if (data == NULL) {
/*
* If data allocation failed previously there
* will be no buffer; try again to re-populate it.
* Note the firmware will not advance to the next
* descriptor with a dma buffer so we must mimic
* this or we'll get out of sync.
*/
DPRINTF(sc, MWL_DEBUG_ANY,
"%s: rx buf w/o dma memory\n", __func__);
(void) mwl_rxbuf_init(sc, bf);
sc->sc_stats.mst_rx_dmabufmissing++;
break;
}
MWL_RXDESC_SYNC(sc, ds,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN)
break;
#ifdef MWL_DEBUG
if (sc->sc_debug & MWL_DEBUG_RECV_DESC)
mwl_printrxbuf(bf, 0);
#endif
status = ds->Status;
if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
sc->sc_stats.mst_rx_crypto++;
/*
* NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR
* for backwards compatibility.
*/
if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR &&
(status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) {
/*
* MIC error, notify upper layers.
*/
bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap,
BUS_DMASYNC_POSTREAD);
mwl_handlemicerror(ic, data);
sc->sc_stats.mst_rx_tkipmic++;
}
/* XXX too painful to tap packets */
goto rx_next;
}
/*
* Sync the data buffer.
*/
len = le16toh(ds->PktLen);
bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD);
/*
* The 802.11 header is provided all or in part at the front;
* use it to calculate the true size of the header that we'll
* construct below. We use this to figure out where to copy
* payload prior to constructing the header.
*/
hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t));
off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4);
/* calculate rssi early so we can re-use for each aggregate */
rssi = cvtrssi(ds->RSSI);
pktlen = hdrlen + (len - off);
/*
* NB: we know our frame is at least as large as
* IEEE80211_MIN_LEN because there is a 4-address
* frame at the front. Hence there's no need to
* vet the packet length. If the frame in fact
* is too small it should be discarded at the
* net80211 layer.
*/
/*
* Attach dma buffer to an mbuf. We tried
* doing this based on the packet size (i.e.
* copying small packets) but it turns out to
* be a net loss. The tradeoff might be system
* dependent (cache architecture is important).
*/
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
DPRINTF(sc, MWL_DEBUG_ANY,
"%s: no rx mbuf\n", __func__);
sc->sc_stats.mst_rx_nombuf++;
goto rx_next;
}
/*
* Acquire the replacement dma buffer before
* processing the frame. If we're out of dma
* buffers we disable rx interrupts and wait
* for the free pool to reach mlw_rxdmalow buffers
* before starting to do work again. If the firmware
* runs out of descriptors then it will toss frames
* which is better than our doing it as that can
* starve our processing. It is also important that
* we always process rx'd frames in case they are
* A-MPDU as otherwise the host's view of the BA
* window may get out of sync with the firmware.
*/
newdata = mwl_getrxdma(sc);
if (newdata == NULL) {
/* NB: stat+msg in mwl_getrxdma */
m_free(m);
/* disable RX interrupt and mark state */
mwl_hal_intrset(sc->sc_mh,
sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY);
sc->sc_rxblocked = 1;
ieee80211_drain(ic);
/* XXX check rxblocked and immediately start again? */
goto rx_stop;
}
bf->bf_data = newdata;
/*
* Attach the dma buffer to the mbuf;
* mwl_rxbuf_init will re-setup the rx
* descriptor using the replacement dma
* buffer we just installed above.
*/
MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free,
data, sc, 0, EXT_NET_DRV);
m->m_data += off - hdrlen;
m->m_pkthdr.len = m->m_len = pktlen;
- m->m_pkthdr.rcvif = ifp;
/* NB: dma buffer assumed read-only */
/*
* Piece 802.11 header together.
*/
wh = mtod(m, struct ieee80211_qosframe *);
/* NB: don't need to do this sometimes but ... */
/* XXX special case so we can memcpy after m_devget? */
ovbcopy(data + sizeof(uint16_t), wh, hdrlen);
if (IEEE80211_QOS_HAS_SEQ(wh)) {
if (IEEE80211_DIR_DSTODS(wh)) {
wh4 = mtod(m,
struct ieee80211_qosframe_addr4*);
*(uint16_t *)wh4->i_qos = ds->QosCtrl;
} else {
*(uint16_t *)wh->i_qos = ds->QosCtrl;
}
}
/*
* The f/w strips WEP header but doesn't clear
* the WEP bit; mark the packet with M_WEP so
* net80211 will treat the data as decrypted.
* While here also clear the PWR_MGT bit since
* power save is handled by the firmware and
* passing this up will potentially cause the
* upper layer to put a station in power save
* (except when configured with MWL_HOST_PS_SUPPORT).
*/
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)
m->m_flags |= M_WEP;
#ifdef MWL_HOST_PS_SUPPORT
wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
#else
wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED |
IEEE80211_FC1_PWR_MGT);
#endif
if (ieee80211_radiotap_active(ic)) {
struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th;
tap->wr_flags = 0;
tap->wr_rate = ds->Rate;
tap->wr_antsignal = rssi + nf;
tap->wr_antnoise = nf;
}
if (IFF_DUMPPKTS_RECV(sc, wh)) {
ieee80211_dump_pkt(ic, mtod(m, caddr_t),
len, ds->Rate, rssi);
}
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
-
/* dispatch */
ni = ieee80211_find_rxnode(ic,
(const struct ieee80211_frame_min *) wh);
if (ni != NULL) {
mn = MWL_NODE(ni);
#ifdef MWL_ANT_INFO_SUPPORT
mn->mn_ai.rssi_a = ds->ai.rssi_a;
mn->mn_ai.rssi_b = ds->ai.rssi_b;
mn->mn_ai.rssi_c = ds->ai.rssi_c;
mn->mn_ai.rsvd1 = rssi;
#endif
/* tag AMPDU aggregates for reorder processing */
if (ni->ni_flags & IEEE80211_NODE_HT)
m->m_flags |= M_AMPDU;
(void) ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
rx_next:
/* NB: ignore ENOMEM so we process more descriptors */
(void) mwl_rxbuf_init(sc, bf);
bf = STAILQ_NEXT(bf, bf_list);
}
rx_stop:
sc->sc_rxnext = bf;
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
- !IFQ_IS_EMPTY(&ifp->if_snd)) {
+ if (mbufq_first(&sc->sc_snd) != NULL) {
/* NB: kick fw; the tx thread may have been preempted */
mwl_hal_txstart(sc->sc_mh, 0);
- mwl_start(ifp);
+ mwl_start(sc);
}
#undef IEEE80211_DIR_DSTODS
}
static void
mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum)
{
struct mwl_txbuf *bf, *bn;
struct mwl_txdesc *ds;
MWL_TXQ_LOCK_INIT(sc, txq);
txq->qnum = qnum;
txq->txpri = 0; /* XXX */
#if 0
/* NB: q setup by mwl_txdma_setup XXX */
STAILQ_INIT(&txq->free);
#endif
STAILQ_FOREACH(bf, &txq->free, bf_list) {
bf->bf_txq = txq;
ds = bf->bf_desc;
bn = STAILQ_NEXT(bf, bf_list);
if (bn == NULL)
bn = STAILQ_FIRST(&txq->free);
ds->pPhysNext = htole32(bn->bf_daddr);
}
STAILQ_INIT(&txq->active);
}
/*
* Setup a hardware data transmit queue for the specified
* access control. We record the mapping from ac's
* to h/w queues for use by mwl_tx_start.
*/
static int
mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
struct mwl_txq *txq;
if (ac >= N(sc->sc_ac2q)) {
device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
ac, N(sc->sc_ac2q));
return 0;
}
if (mvtype >= MWL_NUM_TX_QUEUES) {
device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n",
mvtype, MWL_NUM_TX_QUEUES);
return 0;
}
txq = &sc->sc_txq[mvtype];
mwl_txq_init(sc, txq, mvtype);
sc->sc_ac2q[ac] = txq;
return 1;
#undef N
}
/*
* Update WME parameters for a transmit queue.
*/
static int
mwl_txq_update(struct mwl_softc *sc, int ac)
{
#define MWL_EXPONENT_TO_VALUE(v) ((1<<v)-1)
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mwl_txq *txq = sc->sc_ac2q[ac];
struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
struct mwl_hal *mh = sc->sc_mh;
int aifs, cwmin, cwmax, txoplim;
aifs = wmep->wmep_aifsn;
/* XXX in sta mode need to pass log values for cwmin/max */
cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */
if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) {
device_printf(sc->sc_dev, "unable to update hardware queue "
"parameters for %s traffic!\n",
ieee80211_wme_acnames[ac]);
return 0;
}
return 1;
#undef MWL_EXPONENT_TO_VALUE
}
/*
* Callback from the 802.11 layer to update WME parameters.
*/
static int
mwl_wme_update(struct ieee80211com *ic)
{
- struct mwl_softc *sc = ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
return !mwl_txq_update(sc, WME_AC_BE) ||
!mwl_txq_update(sc, WME_AC_BK) ||
!mwl_txq_update(sc, WME_AC_VI) ||
!mwl_txq_update(sc, WME_AC_VO) ? EIO : 0;
}
/*
* Reclaim resources for a setup queue.
*/
static void
mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq)
{
/* XXX hal work? */
MWL_TXQ_LOCK_DESTROY(txq);
}
/*
* Reclaim all tx queue resources.
*/
static void
mwl_tx_cleanup(struct mwl_softc *sc)
{
int i;
for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
mwl_tx_cleanupq(sc, &sc->sc_txq[i]);
}
static int
mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
{
struct mbuf *m;
int error;
/*
* Load the DMA map so any coalescing is done. This
* also calculates the number of descriptors we need.
*/
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error == EFBIG) {
/* XXX packet requires too many descriptors */
bf->bf_nseg = MWL_TXDESC+1;
} else if (error != 0) {
sc->sc_stats.mst_tx_busdma++;
m_freem(m0);
return error;
}
/*
* Discard null packets and check for packets that
* require too many TX descriptors. We try to convert
* the latter to a cluster.
*/
if (error == EFBIG) { /* too many desc's, linearize */
sc->sc_stats.mst_tx_linear++;
#if MWL_TXDESC > 1
m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
#else
m = m_defrag(m0, M_NOWAIT);
#endif
if (m == NULL) {
m_freem(m0);
sc->sc_stats.mst_tx_nombuf++;
return ENOMEM;
}
m0 = m;
error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
bf->bf_segs, &bf->bf_nseg,
BUS_DMA_NOWAIT);
if (error != 0) {
sc->sc_stats.mst_tx_busdma++;
m_freem(m0);
return error;
}
KASSERT(bf->bf_nseg <= MWL_TXDESC,
("too many segments after defrag; nseg %u", bf->bf_nseg));
} else if (bf->bf_nseg == 0) { /* null packet, discard */
sc->sc_stats.mst_tx_nodata++;
m_freem(m0);
return EIO;
}
DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n",
__func__, m0, m0->m_pkthdr.len);
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
bf->bf_m = m0;
return 0;
}
static __inline int
mwl_cvtlegacyrate(int rate)
{
switch (rate) {
case 2: return 0;
case 4: return 1;
case 11: return 2;
case 22: return 3;
case 44: return 4;
case 12: return 5;
case 18: return 6;
case 24: return 7;
case 36: return 8;
case 48: return 9;
case 72: return 10;
case 96: return 11;
case 108:return 12;
}
return 0;
}
/*
* Calculate fixed tx rate information per client state;
* this value is suitable for writing to the Format field
* of a tx descriptor.
*/
static uint16_t
mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni)
{
uint16_t fmt;
fmt = SM(3, EAGLE_TXD_ANTENNA)
| (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ?
EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI);
if (rate & IEEE80211_RATE_MCS) { /* HT MCS */
fmt |= EAGLE_TXD_FORMAT_HT
/* NB: 0x80 implicitly stripped from ucastrate */
| SM(rate, EAGLE_TXD_RATE);
/* XXX short/long GI may be wrong; re-check */
if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
fmt |= EAGLE_TXD_CHW_40
| (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ?
EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
} else {
fmt |= EAGLE_TXD_CHW_20
| (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ?
EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG);
}
} else { /* legacy rate */
fmt |= EAGLE_TXD_FORMAT_LEGACY
| SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE)
| EAGLE_TXD_CHW_20
/* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */
| (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ?
EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG);
}
return fmt;
}
static int
mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf,
struct mbuf *m0)
{
#define IEEE80211_DIR_DSTODS(wh) \
((wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
int error, iswep, ismcast;
int hdrlen, copyhdrlen, pktlen;
struct mwl_txdesc *ds;
struct mwl_txq *txq;
struct ieee80211_frame *wh;
struct mwltxrec *tr;
struct mwl_node *mn;
uint16_t qos;
#if MWL_TXDESC > 1
int i;
#endif
wh = mtod(m0, struct ieee80211_frame *);
iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED;
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
hdrlen = ieee80211_anyhdrsize(wh);
copyhdrlen = hdrlen;
pktlen = m0->m_pkthdr.len;
if (IEEE80211_QOS_HAS_SEQ(wh)) {
if (IEEE80211_DIR_DSTODS(wh)) {
qos = *(uint16_t *)
(((struct ieee80211_qosframe_addr4 *) wh)->i_qos);
copyhdrlen -= sizeof(qos);
} else
qos = *(uint16_t *)
(((struct ieee80211_qosframe *) wh)->i_qos);
} else
qos = 0;
if (iswep) {
const struct ieee80211_cipher *cip;
struct ieee80211_key *k;
/*
* Construct the 802.11 header+trailer for an encrypted
* frame. The only reason this can fail is because of an
* unknown or unsupported cipher/key type.
*
* NB: we do this even though the firmware will ignore
* what we've done for WEP and TKIP as we need the
* ExtIV filled in for CCMP and this also adjusts
* the headers which simplifies our work below.
*/
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
/*
* This can happen when the key is yanked after the
* frame was queued. Just discard the frame; the
* 802.11 layer counts failures and provides
* debugging/diagnostics.
*/
m_freem(m0);
return EIO;
}
/*
* Adjust the packet length for the crypto additions
* done during encap and any other bits that the f/w
* will add later on.
*/
cip = k->wk_cipher;
pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer;
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
sc->sc_tx_th.wt_flags = 0; /* XXX */
if (iswep)
sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
#if 0
sc->sc_tx_th.wt_rate = ds->DataRate;
#endif
sc->sc_tx_th.wt_txpower = ni->ni_txpower;
sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
ieee80211_radiotap_tx(vap, m0);
}
/*
* Copy up/down the 802.11 header; the firmware requires
* we present a 2-byte payload length followed by a
* 4-address header (w/o QoS), followed (optionally) by
* any WEP/ExtIV header (but only filled in for CCMP).
* We are assured the mbuf has sufficient headroom to
* prepend in-place by the setup of ic_headroom in
* mwl_attach.
*/
if (hdrlen < sizeof(struct mwltxrec)) {
const int space = sizeof(struct mwltxrec) - hdrlen;
if (M_LEADINGSPACE(m0) < space) {
/* NB: should never happen */
device_printf(sc->sc_dev,
"not enough headroom, need %d found %zd, "
"m_flags 0x%x m_len %d\n",
space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len);
ieee80211_dump_pkt(ic,
mtod(m0, const uint8_t *), m0->m_len, 0, -1);
m_freem(m0);
sc->sc_stats.mst_tx_noheadroom++;
return EIO;
}
M_PREPEND(m0, space, M_NOWAIT);
}
tr = mtod(m0, struct mwltxrec *);
if (wh != (struct ieee80211_frame *) &tr->wh)
ovbcopy(wh, &tr->wh, hdrlen);
/*
* Note: the "firmware length" is actually the length
* of the fully formed "802.11 payload". That is, it's
* everything except for the 802.11 header. In particular
* this includes all crypto material including the MIC!
*/
tr->fwlen = htole16(pktlen - hdrlen);
/*
* Load the DMA map so any coalescing is done. This
* also calculates the number of descriptors we need.
*/
error = mwl_tx_dmasetup(sc, bf, m0);
if (error != 0) {
/* NB: stat collected in mwl_tx_dmasetup */
DPRINTF(sc, MWL_DEBUG_XMIT,
"%s: unable to setup dma\n", __func__);
return error;
}
bf->bf_node = ni; /* NB: held reference */
m0 = bf->bf_m; /* NB: may have changed */
tr = mtod(m0, struct mwltxrec *);
wh = (struct ieee80211_frame *)&tr->wh;
/*
* Formulate tx descriptor.
*/
ds = bf->bf_desc;
txq = bf->bf_txq;
ds->QosCtrl = qos; /* NB: already little-endian */
#if MWL_TXDESC == 1
/*
* NB: multiframes should be zero because the descriptors
* are initialized to zero. This should handle the case
* where the driver is built with MWL_TXDESC=1 but we are
* using firmware with multi-segment support.
*/
ds->PktPtr = htole32(bf->bf_segs[0].ds_addr);
ds->PktLen = htole16(bf->bf_segs[0].ds_len);
#else
ds->multiframes = htole32(bf->bf_nseg);
ds->PktLen = htole16(m0->m_pkthdr.len);
for (i = 0; i < bf->bf_nseg; i++) {
ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr);
ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len);
}
#endif
/* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */
ds->Format = 0;
ds->pad = 0;
ds->ack_wcb_addr = 0;
mn = MWL_NODE(ni);
/*
* Select transmit rate.
*/
switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
case IEEE80211_FC0_TYPE_MGT:
sc->sc_stats.mst_tx_mgmt++;
/* fall thru... */
case IEEE80211_FC0_TYPE_CTL:
/* NB: assign to BE q to avoid bursting */
ds->TxPriority = MWL_WME_AC_BE;
break;
case IEEE80211_FC0_TYPE_DATA:
if (!ismcast) {
const struct ieee80211_txparam *tp = ni->ni_txparms;
/*
* EAPOL frames get forced to a fixed rate and w/o
* aggregation; otherwise check for any fixed rate
* for the client (may depend on association state).
*/
if (m0->m_flags & M_EAPOL) {
const struct mwl_vap *mvp = MWL_VAP_CONST(vap);
ds->Format = mvp->mv_eapolformat;
ds->pad = htole16(
EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR);
} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
/* XXX pre-calculate per node */
ds->Format = htole16(
mwl_calcformat(tp->ucastrate, ni));
ds->pad = htole16(EAGLE_TXD_FIXED_RATE);
}
/* NB: EAPOL frames will never have qos set */
if (qos == 0)
ds->TxPriority = txq->qnum;
#if MWL_MAXBA > 3
else if (mwl_bastream_match(&mn->mn_ba[3], qos))
ds->TxPriority = mn->mn_ba[3].txq;
#endif
#if MWL_MAXBA > 2
else if (mwl_bastream_match(&mn->mn_ba[2], qos))
ds->TxPriority = mn->mn_ba[2].txq;
#endif
#if MWL_MAXBA > 1
else if (mwl_bastream_match(&mn->mn_ba[1], qos))
ds->TxPriority = mn->mn_ba[1].txq;
#endif
#if MWL_MAXBA > 0
else if (mwl_bastream_match(&mn->mn_ba[0], qos))
ds->TxPriority = mn->mn_ba[0].txq;
#endif
else
ds->TxPriority = txq->qnum;
} else
ds->TxPriority = txq->qnum;
break;
default:
- if_printf(ifp, "bogus frame type 0x%x (%s)\n",
+ device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
sc->sc_stats.mst_tx_badframetype++;
m_freem(m0);
return EIO;
}
if (IFF_DUMPPKTS_XMIT(sc))
ieee80211_dump_pkt(ic,
mtod(m0, const uint8_t *)+sizeof(uint16_t),
m0->m_len - sizeof(uint16_t), ds->DataRate, -1);
MWL_TXQ_LOCK(txq);
ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED);
STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
sc->sc_tx_timer = 5;
MWL_TXQ_UNLOCK(txq);
return 0;
#undef IEEE80211_DIR_DSTODS
}
static __inline int
mwl_cvtlegacyrix(int rix)
{
#define N(x) (sizeof(x)/sizeof(x[0]))
static const int ieeerates[] =
{ 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 };
return (rix < N(ieeerates) ? ieeerates[rix] : 0);
#undef N
}
/*
* Process completed xmit descriptors from the specified queue.
*/
static int
mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
{
#define EAGLE_TXD_STATUS_MCAST \
(EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX)
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mwl_txbuf *bf;
struct mwl_txdesc *ds;
struct ieee80211_node *ni;
struct mwl_node *an;
int nreaped;
uint32_t status;
DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum);
for (nreaped = 0;; nreaped++) {
MWL_TXQ_LOCK(txq);
bf = STAILQ_FIRST(&txq->active);
if (bf == NULL) {
MWL_TXQ_UNLOCK(txq);
break;
}
ds = bf->bf_desc;
MWL_TXDESC_SYNC(txq, ds,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) {
MWL_TXQ_UNLOCK(txq);
break;
}
STAILQ_REMOVE_HEAD(&txq->active, bf_list);
MWL_TXQ_UNLOCK(txq);
#ifdef MWL_DEBUG
if (sc->sc_debug & MWL_DEBUG_XMIT_DESC)
mwl_printtxbuf(bf, txq->qnum, nreaped);
#endif
ni = bf->bf_node;
if (ni != NULL) {
an = MWL_NODE(ni);
status = le32toh(ds->Status);
if (status & EAGLE_TXD_STATUS_OK) {
uint16_t Format = le16toh(ds->Format);
uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA);
sc->sc_stats.mst_ant_tx[txant]++;
if (status & EAGLE_TXD_STATUS_OK_RETRY)
sc->sc_stats.mst_tx_retries++;
if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY)
sc->sc_stats.mst_tx_mretries++;
if (txq->qnum >= MWL_WME_AC_VO)
ic->ic_wme.wme_hipri_traffic++;
ni->ni_txrate = MS(Format, EAGLE_TXD_RATE);
if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
ni->ni_txrate = mwl_cvtlegacyrix(
ni->ni_txrate);
} else
ni->ni_txrate |= IEEE80211_RATE_MCS;
sc->sc_stats.mst_tx_rate = ni->ni_txrate;
} else {
if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
sc->sc_stats.mst_tx_linkerror++;
if (status & EAGLE_TXD_STATUS_FAILED_XRETRY)
sc->sc_stats.mst_tx_xretries++;
if (status & EAGLE_TXD_STATUS_FAILED_AGING)
sc->sc_stats.mst_tx_aging++;
if (bf->bf_m->m_flags & M_FF)
sc->sc_stats.mst_ff_txerr++;
}
- /*
- * Do any tx complete callback. Note this must
- * be done before releasing the node reference.
- * XXX no way to figure out if frame was ACK'd
- */
- if (bf->bf_m->m_flags & M_TXCB) {
+ if (bf->bf_m->m_flags & M_TXCB)
/* XXX strip fw len in case header inspected */
m_adj(bf->bf_m, sizeof(uint16_t));
- ieee80211_process_callback(ni, bf->bf_m,
- (status & EAGLE_TXD_STATUS_OK) == 0);
- }
- /*
- * Reclaim reference to node.
- *
- * NB: the node may be reclaimed here if, for example
- * this is a DEAUTH message that was sent and the
- * node was timed out due to inactivity.
- */
- ieee80211_free_node(ni);
- }
+ ieee80211_tx_complete(ni, bf->bf_m,
+ (status & EAGLE_TXD_STATUS_OK) == 0);
+ } else
+ m_freem(bf->bf_m);
ds->Status = htole32(EAGLE_TXD_STATUS_IDLE);
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
- m_freem(bf->bf_m);
mwl_puttxbuf_tail(txq, bf);
}
return nreaped;
#undef EAGLE_TXD_STATUS_MCAST
}
/*
* Deferred processing of transmit interrupt; special-cased
* for four hardware queues, 0-3.
*/
static void
mwl_tx_proc(void *arg, int npending)
{
struct mwl_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
int nreaped;
/*
* Process each active queue.
*/
nreaped = 0;
if (!STAILQ_EMPTY(&sc->sc_txq[0].active))
nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]);
if (!STAILQ_EMPTY(&sc->sc_txq[1].active))
nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]);
if (!STAILQ_EMPTY(&sc->sc_txq[2].active))
nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]);
if (!STAILQ_EMPTY(&sc->sc_txq[3].active))
nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]);
if (nreaped != 0) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->sc_tx_timer = 0;
- if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
+ if (mbufq_first(&sc->sc_snd) != NULL) {
/* NB: kick fw; the tx thread may have been preempted */
mwl_hal_txstart(sc->sc_mh, 0);
- mwl_start(ifp);
+ mwl_start(sc);
}
}
}
static void
mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq)
{
struct ieee80211_node *ni;
struct mwl_txbuf *bf;
u_int ix;
/*
* NB: this assumes output has been stopped and
* we do not need to block mwl_tx_tasklet
*/
for (ix = 0;; ix++) {
MWL_TXQ_LOCK(txq);
bf = STAILQ_FIRST(&txq->active);
if (bf == NULL) {
MWL_TXQ_UNLOCK(txq);
break;
}
STAILQ_REMOVE_HEAD(&txq->active, bf_list);
MWL_TXQ_UNLOCK(txq);
#ifdef MWL_DEBUG
if (sc->sc_debug & MWL_DEBUG_RESET) {
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct mwltxrec *tr =
mtod(bf->bf_m, const struct mwltxrec *);
mwl_printtxbuf(bf, txq->qnum, ix);
ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh,
bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1);
}
#endif /* MWL_DEBUG */
bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
ni = bf->bf_node;
if (ni != NULL) {
/*
* Reclaim node reference.
*/
ieee80211_free_node(ni);
}
m_freem(bf->bf_m);
mwl_puttxbuf_tail(txq, bf);
}
}
/*
* Drain the transmit queues and reclaim resources.
*/
static void
mwl_draintxq(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
int i;
for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
mwl_tx_draintxq(sc, &sc->sc_txq[i]);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
sc->sc_tx_timer = 0;
}
#ifdef MWL_DIAGAPI
/*
* Reset the transmit queues to a pristine state after a fw download.
*/
static void
mwl_resettxq(struct mwl_softc *sc)
{
int i;
for (i = 0; i < MWL_NUM_TX_QUEUES; i++)
mwl_txq_reset(sc, &sc->sc_txq[i]);
}
#endif /* MWL_DIAGAPI */
/*
* Clear the transmit queues of any frames submitted for the
* specified vap. This is done when the vap is deleted so we
* don't potentially reference the vap after it is gone.
* Note we cannot remove the frames; we only reclaim the node
* reference.
*/
static void
mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap)
{
struct mwl_txq *txq;
struct mwl_txbuf *bf;
int i;
for (i = 0; i < MWL_NUM_TX_QUEUES; i++) {
txq = &sc->sc_txq[i];
MWL_TXQ_LOCK(txq);
STAILQ_FOREACH(bf, &txq->active, bf_list) {
struct ieee80211_node *ni = bf->bf_node;
if (ni != NULL && ni->ni_vap == vap) {
bf->bf_node = NULL;
ieee80211_free_node(ni);
}
}
MWL_TXQ_UNLOCK(txq);
}
}
static int
mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh,
const uint8_t *frm, const uint8_t *efrm)
{
- struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ni->ni_ic->ic_softc;
const struct ieee80211_action *ia;
ia = (const struct ieee80211_action *) frm;
if (ia->ia_category == IEEE80211_ACTION_CAT_HT &&
ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) {
const struct ieee80211_action_ht_mimopowersave *mps =
(const struct ieee80211_action_ht_mimopowersave *) ia;
mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr,
mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA,
MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE));
return 0;
} else
return sc->sc_recv_action(ni, wh, frm, efrm);
}
static int
mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
int dialogtoken, int baparamset, int batimeout)
{
- struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ni->ni_ic->ic_softc;
struct ieee80211vap *vap = ni->ni_vap;
struct mwl_node *mn = MWL_NODE(ni);
struct mwl_bastate *bas;
bas = tap->txa_private;
if (bas == NULL) {
const MWL_HAL_BASTREAM *sp;
/*
* Check for a free BA stream slot.
*/
#if MWL_MAXBA > 3
if (mn->mn_ba[3].bastream == NULL)
bas = &mn->mn_ba[3];
else
#endif
#if MWL_MAXBA > 2
if (mn->mn_ba[2].bastream == NULL)
bas = &mn->mn_ba[2];
else
#endif
#if MWL_MAXBA > 1
if (mn->mn_ba[1].bastream == NULL)
bas = &mn->mn_ba[1];
else
#endif
#if MWL_MAXBA > 0
if (mn->mn_ba[0].bastream == NULL)
bas = &mn->mn_ba[0];
else
#endif
{
/* sta already has max BA streams */
/* XXX assign BA stream to highest priority tid */
DPRINTF(sc, MWL_DEBUG_AMPDU,
"%s: already has max bastreams\n", __func__);
sc->sc_stats.mst_ampdu_reject++;
return 0;
}
/* NB: no held reference to ni */
sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap,
(baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0,
ni->ni_macaddr, tap->txa_tid, ni->ni_htparam,
ni, tap);
if (sp == NULL) {
/*
* No available stream, return 0 so no
* a-mpdu aggregation will be done.
*/
DPRINTF(sc, MWL_DEBUG_AMPDU,
"%s: no bastream available\n", __func__);
sc->sc_stats.mst_ampdu_nostream++;
return 0;
}
DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n",
__func__, sp);
/* NB: qos is left zero so we won't match in mwl_tx_start */
bas->bastream = sp;
tap->txa_private = bas;
}
/* fetch current seq# from the firmware; if available */
if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream,
vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr,
&tap->txa_start) != 0)
tap->txa_start = 0;
return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout);
}
static int
mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
int code, int baparamset, int batimeout)
{
- struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ni->ni_ic->ic_softc;
struct mwl_bastate *bas;
bas = tap->txa_private;
if (bas == NULL) {
/* XXX should not happen */
DPRINTF(sc, MWL_DEBUG_AMPDU,
"%s: no BA stream allocated, TID %d\n",
__func__, tap->txa_tid);
sc->sc_stats.mst_addba_nostream++;
return 0;
}
if (code == IEEE80211_STATUS_SUCCESS) {
struct ieee80211vap *vap = ni->ni_vap;
int bufsiz, error;
/*
* Tell the firmware to setup the BA stream;
* we know resources are available because we
* pre-allocated one before forming the request.
*/
bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ);
if (bufsiz == 0)
bufsiz = IEEE80211_AGGR_BAWMAX;
error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap,
bas->bastream, bufsiz, bufsiz, tap->txa_start);
if (error != 0) {
/*
* Setup failed, return immediately so no a-mpdu
* aggregation will be done.
*/
mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
mwl_bastream_free(bas);
tap->txa_private = NULL;
DPRINTF(sc, MWL_DEBUG_AMPDU,
"%s: create failed, error %d, bufsiz %d TID %d "
"htparam 0x%x\n", __func__, error, bufsiz,
tap->txa_tid, ni->ni_htparam);
sc->sc_stats.mst_bacreate_failed++;
return 0;
}
/* NB: cache txq to avoid ptr indirect */
mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq);
DPRINTF(sc, MWL_DEBUG_AMPDU,
"%s: bastream %p assigned to txq %d TID %d bufsiz %d "
"htparam 0x%x\n", __func__, bas->bastream,
bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam);
} else {
/*
* Other side NAK'd us; return the resources.
*/
DPRINTF(sc, MWL_DEBUG_AMPDU,
"%s: request failed with code %d, destroy bastream %p\n",
__func__, code, bas->bastream);
mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
mwl_bastream_free(bas);
tap->txa_private = NULL;
}
/* NB: firmware sends BAR so we don't need to */
return sc->sc_addba_response(ni, tap, code, baparamset, batimeout);
}
static void
mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
{
- struct mwl_softc *sc = ni->ni_ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ni->ni_ic->ic_softc;
struct mwl_bastate *bas;
bas = tap->txa_private;
if (bas != NULL) {
DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n",
__func__, bas->bastream);
mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream);
mwl_bastream_free(bas);
tap->txa_private = NULL;
}
sc->sc_addba_stop(ni, tap);
}
/*
* Setup the rx data structures. This should only be
* done once or we may get out of sync with the firmware.
*/
static int
mwl_startrecv(struct mwl_softc *sc)
{
if (!sc->sc_recvsetup) {
struct mwl_rxbuf *bf, *prev;
struct mwl_rxdesc *ds;
prev = NULL;
STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
int error = mwl_rxbuf_init(sc, bf);
if (error != 0) {
DPRINTF(sc, MWL_DEBUG_RECV,
"%s: mwl_rxbuf_init failed %d\n",
__func__, error);
return error;
}
if (prev != NULL) {
ds = prev->bf_desc;
ds->pPhysNext = htole32(bf->bf_daddr);
}
prev = bf;
}
if (prev != NULL) {
ds = prev->bf_desc;
ds->pPhysNext =
htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr);
}
sc->sc_recvsetup = 1;
}
mwl_mode_init(sc); /* set filters, etc. */
return 0;
}
static MWL_HAL_APMODE
mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan)
{
MWL_HAL_APMODE mode;
if (IEEE80211_IS_CHAN_HT(chan)) {
if (vap->iv_flags_ht & IEEE80211_FHT_PUREN)
mode = AP_MODE_N_ONLY;
else if (IEEE80211_IS_CHAN_5GHZ(chan))
mode = AP_MODE_AandN;
else if (vap->iv_flags & IEEE80211_F_PUREG)
mode = AP_MODE_GandN;
else
mode = AP_MODE_BandGandN;
} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
if (vap->iv_flags & IEEE80211_F_PUREG)
mode = AP_MODE_G_ONLY;
else
mode = AP_MODE_MIXED;
} else if (IEEE80211_IS_CHAN_B(chan))
mode = AP_MODE_B_ONLY;
else if (IEEE80211_IS_CHAN_A(chan))
mode = AP_MODE_A_ONLY;
else
mode = AP_MODE_MIXED; /* XXX should not happen? */
return mode;
}
static int
mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan)
{
struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan));
}
/*
* Set/change channels.
*/
static int
mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan)
{
struct mwl_hal *mh = sc->sc_mh;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
MWL_HAL_CHANNEL hchan;
int maxtxpow;
DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n",
__func__, chan->ic_freq, chan->ic_flags);
/*
* Convert to a HAL channel description with
* the flags constrained to reflect the current
* operating mode.
*/
mwl_mapchan(&hchan, chan);
mwl_hal_intrset(mh, 0); /* disable interrupts */
#if 0
mwl_draintxq(sc); /* clear pending tx frames */
#endif
mwl_hal_setchannel(mh, &hchan);
/*
* Tx power is cap'd by the regulatory setting and
* possibly a user-set limit. We pass the min of
* these to the hal to apply them to the cal data
* for this channel.
* XXX min bound?
*/
maxtxpow = 2*chan->ic_maxregpower;
if (maxtxpow > ic->ic_txpowlimit)
maxtxpow = ic->ic_txpowlimit;
mwl_hal_settxpower(mh, &hchan, maxtxpow / 2);
/* NB: potentially change mcast/mgt rates */
mwl_setcurchanrates(sc);
/*
* Update internal state.
*/
sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq);
sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq);
if (IEEE80211_IS_CHAN_A(chan)) {
sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A);
sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A);
} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G);
sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G);
} else {
sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B);
sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B);
}
sc->sc_curchan = hchan;
mwl_hal_intrset(mh, sc->sc_imask);
return 0;
}
static void
mwl_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct mwl_softc *sc = ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
}
static void
mwl_scan_end(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct mwl_softc *sc = ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__);
}
static void
mwl_set_channel(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct mwl_softc *sc = ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
(void) mwl_chan_set(sc, ic->ic_curchan);
}
/*
* Handle a channel switch request. We inform the firmware
* and mark the global state to suppress various actions.
* NB: we issue only one request to the fw; we may be called
* multiple times if there are multiple vap's.
*/
static void
mwl_startcsa(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
- struct mwl_softc *sc = ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
MWL_HAL_CHANNEL hchan;
if (sc->sc_csapending)
return;
mwl_mapchan(&hchan, ic->ic_csa_newchan);
/* 1 =>'s quiet channel */
mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count);
sc->sc_csapending = 1;
}
/*
* Plumb any static WEP key for the station. This is
* necessary as we must propagate the key from the
* global key table of the vap to each sta db entry.
*/
static void
mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) ==
IEEE80211_F_PRIVACY &&
vap->iv_def_txkey != IEEE80211_KEYIX_NONE &&
vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE)
(void) mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac);
}
static int
mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi)
{
#define WME(ie) ((const struct ieee80211_wme_info *) ie)
struct ieee80211vap *vap = ni->ni_vap;
struct mwl_hal_vap *hvap;
int error;
if (vap->iv_opmode == IEEE80211_M_WDS) {
/*
* WDS vap's do not have a f/w vap; instead they piggyback
* on an AP vap and we must install the sta db entry and
* crypto state using that AP's handle (the WDS vap has none).
*/
hvap = MWL_VAP(vap)->mv_ap_hvap;
} else
hvap = MWL_VAP(vap)->mv_hvap;
error = mwl_hal_newstation(hvap, ni->ni_macaddr,
aid, staid, pi,
ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT),
ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0);
if (error == 0) {
/*
* Setup security for this station. For sta mode this is
* needed even though do the same thing on transition to
* AUTH state because the call to mwl_hal_newstation
* clobbers the crypto state we setup.
*/
mwl_setanywepkey(vap, ni->ni_macaddr);
}
return error;
#undef WME
}
static void
mwl_setglobalkeys(struct ieee80211vap *vap)
{
struct ieee80211_key *wk;
wk = &vap->iv_nw_keys[0];
for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++)
if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
(void) mwl_key_set(vap, wk, vap->iv_myaddr);
}
/*
* Convert a legacy rate set to a firmware bitmask.
*/
static uint32_t
get_rate_bitmap(const struct ieee80211_rateset *rs)
{
uint32_t rates;
int i;
rates = 0;
for (i = 0; i < rs->rs_nrates; i++)
switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) {
case 2: rates |= 0x001; break;
case 4: rates |= 0x002; break;
case 11: rates |= 0x004; break;
case 22: rates |= 0x008; break;
case 44: rates |= 0x010; break;
case 12: rates |= 0x020; break;
case 18: rates |= 0x040; break;
case 24: rates |= 0x080; break;
case 36: rates |= 0x100; break;
case 48: rates |= 0x200; break;
case 72: rates |= 0x400; break;
case 96: rates |= 0x800; break;
case 108: rates |= 0x1000; break;
}
return rates;
}
/*
* Construct an HT firmware bitmask from an HT rate set.
*/
static uint32_t
get_htrate_bitmap(const struct ieee80211_htrateset *rs)
{
uint32_t rates;
int i;
rates = 0;
for (i = 0; i < rs->rs_nrates; i++) {
if (rs->rs_rates[i] < 16)
rates |= 1<<rs->rs_rates[i];
}
return rates;
}
/*
* Craft station database entry for station.
* NB: use host byte order here, the hal handles byte swapping.
*/
static MWL_HAL_PEERINFO *
mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
{
const struct ieee80211vap *vap = ni->ni_vap;
memset(pi, 0, sizeof(*pi));
pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates);
pi->CapInfo = ni->ni_capinfo;
if (ni->ni_flags & IEEE80211_NODE_HT) {
/* HT capabilities, etc */
pi->HTCapabilitiesInfo = ni->ni_htcap;
/* XXX pi.HTCapabilitiesInfo */
pi->MacHTParamInfo = ni->ni_htparam;
pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates);
pi->AddHtInfo.ControlChan = ni->ni_htctlchan;
pi->AddHtInfo.AddChan = ni->ni_ht2ndchan;
pi->AddHtInfo.OpMode = ni->ni_htopmode;
pi->AddHtInfo.stbc = ni->ni_htstbc;
/* constrain according to local configuration */
if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
if (ni->ni_chw != 40)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
}
return pi;
}
/*
* Re-create the local sta db entry for a vap to ensure
* up to date WME state is pushed to the firmware. Because
* this resets crypto state this must be followed by a
* reload of any keys in the global key table.
*/
static int
mwl_localstadb(struct ieee80211vap *vap)
{
#define WME(ie) ((const struct ieee80211_wme_info *) ie)
struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap;
struct ieee80211_node *bss;
MWL_HAL_PEERINFO pi;
int error;
switch (vap->iv_opmode) {
case IEEE80211_M_STA:
bss = vap->iv_bss;
error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0,
vap->iv_state == IEEE80211_S_RUN ?
mkpeerinfo(&pi, bss) : NULL,
(bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)),
bss->ni_ies.wme_ie != NULL ?
WME(bss->ni_ies.wme_ie)->wme_info : 0);
if (error == 0)
mwl_setglobalkeys(vap);
break;
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
error = mwl_hal_newstation(hvap, vap->iv_myaddr,
0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0);
if (error == 0)
mwl_setglobalkeys(vap);
break;
default:
error = 0;
break;
}
return error;
#undef WME
}
static int
mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct mwl_vap *mvp = MWL_VAP(vap);
struct mwl_hal_vap *hvap = mvp->mv_hvap;
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_node *ni = NULL;
- struct ifnet *ifp = ic->ic_ifp;
- struct mwl_softc *sc = ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
struct mwl_hal *mh = sc->sc_mh;
enum ieee80211_state ostate = vap->iv_state;
int error;
DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n",
vap->iv_ifp->if_xname, __func__,
ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
callout_stop(&sc->sc_timer);
/*
* Clear current radar detection state.
*/
if (ostate == IEEE80211_S_CAC) {
/* stop quiet mode radar detection */
mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP);
} else if (sc->sc_radarena) {
/* stop in-service radar detection */
mwl_hal_setradardetection(mh, DR_DFS_DISABLE);
sc->sc_radarena = 0;
}
/*
* Carry out per-state actions before doing net80211 work.
*/
if (nstate == IEEE80211_S_INIT) {
/* NB: only ap+sta vap's have a fw entity */
if (hvap != NULL)
mwl_hal_stop(hvap);
} else if (nstate == IEEE80211_S_SCAN) {
mwl_hal_start(hvap);
/* NB: this disables beacon frames */
mwl_hal_setinframode(hvap);
} else if (nstate == IEEE80211_S_AUTH) {
/*
* Must create a sta db entry in case a WEP key needs to
* be plumbed. This entry will be overwritten if we
* associate; otherwise it will be reclaimed on node free.
*/
ni = vap->iv_bss;
MWL_NODE(ni)->mn_hvap = hvap;
(void) mwl_peerstadb(ni, 0, 0, NULL);
} else if (nstate == IEEE80211_S_CSA) {
/* XXX move to below? */
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_MBSS)
mwl_startcsa(vap);
} else if (nstate == IEEE80211_S_CAC) {
/* XXX move to below? */
/* stop ap xmit and enable quiet mode radar detection */
mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START);
}
/*
* Invoke the parent method to do net80211 work.
*/
error = mvp->mv_newstate(vap, nstate, arg);
/*
* Carry out work that must be done after net80211 runs;
* this work requires up to date state (e.g. iv_bss).
*/
if (error == 0 && nstate == IEEE80211_S_RUN) {
/* NB: collect bss node again, it may have changed */
ni = vap->iv_bss;
DPRINTF(sc, MWL_DEBUG_STATE,
"%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
"capinfo 0x%04x chan %d\n",
vap->iv_ifp->if_xname, __func__, vap->iv_flags,
ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo,
ieee80211_chan2ieee(ic, ic->ic_curchan));
/*
* Recreate local sta db entry to update WME/HT state.
*/
mwl_localstadb(vap);
switch (vap->iv_opmode) {
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
if (ostate == IEEE80211_S_CAC) {
/* enable in-service radar detection */
mwl_hal_setradardetection(mh,
DR_IN_SERVICE_MONITOR_START);
sc->sc_radarena = 1;
}
/*
* Allocate and setup the beacon frame
* (and related state).
*/
error = mwl_reset_vap(vap, IEEE80211_S_RUN);
if (error != 0) {
DPRINTF(sc, MWL_DEBUG_STATE,
"%s: beacon setup failed, error %d\n",
__func__, error);
goto bad;
}
/* NB: must be after setting up beacon */
mwl_hal_start(hvap);
break;
case IEEE80211_M_STA:
DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n",
vap->iv_ifp->if_xname, __func__, ni->ni_associd);
/*
* Set state now that we're associated.
*/
mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd);
mwl_setrates(vap);
mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold);
if ((vap->iv_flags & IEEE80211_F_DWDS) &&
sc->sc_ndwdsvaps++ == 0)
mwl_hal_setdwds(mh, 1);
break;
case IEEE80211_M_WDS:
DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n",
vap->iv_ifp->if_xname, __func__,
ether_sprintf(ni->ni_bssid));
mwl_seteapolformat(vap);
break;
default:
break;
}
/*
* Set CS mode according to operating channel;
* this mostly an optimization for 5GHz.
*
* NB: must follow mwl_hal_start which resets csmode
*/
if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE);
else
mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA);
/*
* Start timer to prod firmware.
*/
if (sc->sc_ageinterval != 0)
callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz,
mwl_agestations, sc);
} else if (nstate == IEEE80211_S_SLEEP) {
/* XXX set chip in power save */
} else if ((vap->iv_flags & IEEE80211_F_DWDS) &&
--sc->sc_ndwdsvaps == 0)
mwl_hal_setdwds(mh, 0);
bad:
return error;
}
/*
* Manage station id's; these are separate from AID's
* as AID's may have values out of the range of possible
* station id's acceptable to the firmware.
*/
static int
allocstaid(struct mwl_softc *sc, int aid)
{
int staid;
if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) {
/* NB: don't use 0 */
for (staid = 1; staid < MWL_MAXSTAID; staid++)
if (isclr(sc->sc_staid, staid))
break;
} else
staid = aid;
setbit(sc->sc_staid, staid);
return staid;
}
static void
delstaid(struct mwl_softc *sc, int staid)
{
clrbit(sc->sc_staid, staid);
}
/*
* Setup driver-specific state for a newly associated node.
* Note that we're called also on a re-associate, the isnew
* param tells us if this is the first time or not.
*/
static void
mwl_newassoc(struct ieee80211_node *ni, int isnew)
{
struct ieee80211vap *vap = ni->ni_vap;
- struct mwl_softc *sc = vap->iv_ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = vap->iv_ic->ic_softc;
struct mwl_node *mn = MWL_NODE(ni);
MWL_HAL_PEERINFO pi;
uint16_t aid;
int error;
aid = IEEE80211_AID(ni->ni_associd);
if (isnew) {
mn->mn_staid = allocstaid(sc, aid);
mn->mn_hvap = MWL_VAP(vap)->mv_hvap;
} else {
mn = MWL_NODE(ni);
/* XXX reset BA stream? */
}
DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n",
__func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid);
error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni));
if (error != 0) {
DPRINTF(sc, MWL_DEBUG_NODE,
"%s: error %d creating sta db entry\n",
__func__, error);
/* XXX how to deal with error? */
}
}
/*
* Periodically poke the firmware to age out station state
* (power save queues, pending tx aggregates).
*/
static void
mwl_agestations(void *arg)
{
struct mwl_softc *sc = arg;
mwl_hal_setkeepalive(sc->sc_mh);
if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */
callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz);
}
static const struct mwl_hal_channel *
findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee)
{
int i;
for (i = 0; i < ci->nchannels; i++) {
const struct mwl_hal_channel *hc = &ci->channels[i];
if (hc->ieee == ieee)
return hc;
}
return NULL;
}
static int
mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
int nchan, struct ieee80211_channel chans[])
{
- struct mwl_softc *sc = ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
struct mwl_hal *mh = sc->sc_mh;
const MWL_HAL_CHANNELINFO *ci;
int i;
for (i = 0; i < nchan; i++) {
struct ieee80211_channel *c = &chans[i];
const struct mwl_hal_channel *hc;
if (IEEE80211_IS_CHAN_2GHZ(c)) {
mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ,
IEEE80211_IS_CHAN_HT40(c) ?
MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
} else if (IEEE80211_IS_CHAN_5GHZ(c)) {
mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ,
IEEE80211_IS_CHAN_HT40(c) ?
MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci);
} else {
- if_printf(ic->ic_ifp,
+ device_printf(sc->sc_dev,
"%s: channel %u freq %u/0x%x not 2.4/5GHz\n",
__func__, c->ic_ieee, c->ic_freq, c->ic_flags);
return EINVAL;
}
/*
* Verify channel has cal data and cap tx power.
*/
hc = findhalchannel(ci, c->ic_ieee);
if (hc != NULL) {
if (c->ic_maxpower > 2*hc->maxTxPow)
c->ic_maxpower = 2*hc->maxTxPow;
goto next;
}
if (IEEE80211_IS_CHAN_HT40(c)) {
/*
* Look for the extension channel since the
* hal table only has the primary channel.
*/
hc = findhalchannel(ci, c->ic_extieee);
if (hc != NULL) {
if (c->ic_maxpower > 2*hc->maxTxPow)
c->ic_maxpower = 2*hc->maxTxPow;
goto next;
}
}
- if_printf(ic->ic_ifp,
+ device_printf(sc->sc_dev,
"%s: no cal data for channel %u ext %u freq %u/0x%x\n",
__func__, c->ic_ieee, c->ic_extieee,
c->ic_freq, c->ic_flags);
return EINVAL;
next:
;
}
return 0;
}
#define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G)
#define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A)
static void
addchan(struct ieee80211_channel *c, int freq, int flags, int ieee, int txpow)
{
c->ic_freq = freq;
c->ic_flags = flags;
c->ic_ieee = ieee;
c->ic_minpower = 0;
c->ic_maxpower = 2*txpow;
c->ic_maxregpower = txpow;
}
static const struct ieee80211_channel *
findchannel(const struct ieee80211_channel chans[], int nchans,
int freq, int flags)
{
const struct ieee80211_channel *c;
int i;
for (i = 0; i < nchans; i++) {
c = &chans[i];
if (c->ic_freq == freq && c->ic_flags == flags)
return c;
}
return NULL;
}
static void
addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans,
const MWL_HAL_CHANNELINFO *ci, int flags)
{
struct ieee80211_channel *c;
const struct ieee80211_channel *extc;
const struct mwl_hal_channel *hc;
int i;
c = &chans[*nchans];
flags &= ~IEEE80211_CHAN_HT;
for (i = 0; i < ci->nchannels; i++) {
/*
* Each entry defines an HT40 channel pair; find the
* extension channel above and the insert the pair.
*/
hc = &ci->channels[i];
extc = findchannel(chans, *nchans, hc->freq+20,
flags | IEEE80211_CHAN_HT20);
if (extc != NULL) {
if (*nchans >= maxchans)
break;
addchan(c, hc->freq, flags | IEEE80211_CHAN_HT40U,
hc->ieee, hc->maxTxPow);
c->ic_extieee = extc->ic_ieee;
c++, (*nchans)++;
if (*nchans >= maxchans)
break;
addchan(c, extc->ic_freq, flags | IEEE80211_CHAN_HT40D,
extc->ic_ieee, hc->maxTxPow);
c->ic_extieee = hc->ieee;
c++, (*nchans)++;
}
}
}
static void
addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans,
const MWL_HAL_CHANNELINFO *ci, int flags)
{
struct ieee80211_channel *c;
int i;
c = &chans[*nchans];
for (i = 0; i < ci->nchannels; i++) {
const struct mwl_hal_channel *hc;
hc = &ci->channels[i];
if (*nchans >= maxchans)
break;
addchan(c, hc->freq, flags, hc->ieee, hc->maxTxPow);
c++, (*nchans)++;
if (flags == IEEE80211_CHAN_G || flags == IEEE80211_CHAN_HTG) {
/* g channel have a separate b-only entry */
if (*nchans >= maxchans)
break;
c[0] = c[-1];
c[-1].ic_flags = IEEE80211_CHAN_B;
c++, (*nchans)++;
}
if (flags == IEEE80211_CHAN_HTG) {
/* HT g channel have a separate g-only entry */
if (*nchans >= maxchans)
break;
c[-1].ic_flags = IEEE80211_CHAN_G;
c[0] = c[-1];
c[0].ic_flags &= ~IEEE80211_CHAN_HT;
c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
c++, (*nchans)++;
}
if (flags == IEEE80211_CHAN_HTA) {
/* HT a channel have a separate a-only entry */
if (*nchans >= maxchans)
break;
c[-1].ic_flags = IEEE80211_CHAN_A;
c[0] = c[-1];
c[0].ic_flags &= ~IEEE80211_CHAN_HT;
c[0].ic_flags |= IEEE80211_CHAN_HT20; /* HT20 */
c++, (*nchans)++;
}
}
}
static void
getchannels(struct mwl_softc *sc, int maxchans, int *nchans,
struct ieee80211_channel chans[])
{
const MWL_HAL_CHANNELINFO *ci;
/*
* Use the channel info from the hal to craft the
* channel list. Note that we pass back an unsorted
* list; the caller is required to sort it for us
* (if desired).
*/
*nchans = 0;
if (mwl_hal_getchannelinfo(sc->sc_mh,
MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
if (mwl_hal_getchannelinfo(sc->sc_mh,
MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0)
addchannels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
if (mwl_hal_getchannelinfo(sc->sc_mh,
MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG);
if (mwl_hal_getchannelinfo(sc->sc_mh,
MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0)
addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA);
}
static void
mwl_getradiocaps(struct ieee80211com *ic,
int maxchans, int *nchans, struct ieee80211_channel chans[])
{
- struct mwl_softc *sc = ic->ic_ifp->if_softc;
+ struct mwl_softc *sc = ic->ic_softc;
getchannels(sc, maxchans, nchans, chans);
}
static int
mwl_getchannels(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
/*
* Use the channel info from the hal to craft the
* channel list for net80211. Note that we pass up
* an unsorted list; net80211 will sort it for us.
*/
memset(ic->ic_channels, 0, sizeof(ic->ic_channels));
ic->ic_nchans = 0;
getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels);
ic->ic_regdomain.regdomain = SKU_DEBUG;
ic->ic_regdomain.country = CTRY_DEFAULT;
ic->ic_regdomain.location = 'I';
ic->ic_regdomain.isocc[0] = ' '; /* XXX? */
ic->ic_regdomain.isocc[1] = ' ';
return (ic->ic_nchans == 0 ? EIO : 0);
}
#undef IEEE80211_CHAN_HTA
#undef IEEE80211_CHAN_HTG
#ifdef MWL_DEBUG
static void
mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix)
{
const struct mwl_rxdesc *ds = bf->bf_desc;
uint32_t status = le32toh(ds->Status);
printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n"
" STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n",
ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext),
le32toh(ds->pPhysBuffData), ds->RxControl,
ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ?
"" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !",
ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel,
ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2));
}
static void
mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix)
{
const struct mwl_txdesc *ds = bf->bf_desc;
uint32_t status = le32toh(ds->Status);
printf("Q%u[%3u]", qnum, ix);
printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr);
printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n",
le32toh(ds->pPhysNext),
le32toh(ds->PktPtr), le16toh(ds->PktLen), status,
status & EAGLE_TXD_STATUS_USED ?
"" : (status & 3) != 0 ? " *" : " !");
printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n",
ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl),
le32toh(ds->SapPktInfo), le16toh(ds->Format));
#if MWL_TXDESC > 1
printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n"
, le32toh(ds->multiframes)
, le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1])
, le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3])
, le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5])
);
printf(" DATA:%08x %08x %08x %08x %08x %08x\n"
, le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1])
, le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3])
, le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5])
);
#endif
#if 0
{ const uint8_t *cp = (const uint8_t *) ds;
int i;
for (i = 0; i < sizeof(struct mwl_txdesc); i++) {
printf("%02x ", cp[i]);
if (((i+1) % 16) == 0)
printf("\n");
}
printf("\n");
}
#endif
}
#endif /* MWL_DEBUG */
#if 0
static void
mwl_txq_dump(struct mwl_txq *txq)
{
struct mwl_txbuf *bf;
int i = 0;
MWL_TXQ_LOCK(txq);
STAILQ_FOREACH(bf, &txq->active, bf_list) {
struct mwl_txdesc *ds = bf->bf_desc;
MWL_TXDESC_SYNC(txq, ds,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
#ifdef MWL_DEBUG
mwl_printtxbuf(bf, txq->qnum, i);
#endif
i++;
}
MWL_TXQ_UNLOCK(txq);
}
#endif
static void
mwl_watchdog(void *arg)
{
- struct mwl_softc *sc;
- struct ifnet *ifp;
+ struct mwl_softc *sc = arg;
- sc = arg;
callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc);
if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0)
return;
- ifp = sc->sc_ifp;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) {
+ if (sc->sc_running && !sc->sc_invalid) {
if (mwl_hal_setkeepalive(sc->sc_mh))
- if_printf(ifp, "transmit timeout (firmware hung?)\n");
+ device_printf(sc->sc_dev,
+ "transmit timeout (firmware hung?)\n");
else
- if_printf(ifp, "transmit timeout\n");
+ device_printf(sc->sc_dev,
+ "transmit timeout\n");
#if 0
- mwl_reset(ifp);
+ mwl_reset(sc);
mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/
#endif
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
sc->sc_stats.mst_watchdog++;
}
}
#ifdef MWL_DIAGAPI
/*
* Diagnostic interface to the HAL. This is used by various
* tools to do things like retrieve register contents for
* debugging. The mechanism is intentionally opaque so that
* it can change frequently w/o concern for compatiblity.
*/
static int
mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md)
{
struct mwl_hal *mh = sc->sc_mh;
u_int id = md->md_id & MWL_DIAG_ID;
void *indata = NULL;
void *outdata = NULL;
u_int32_t insize = md->md_in_size;
u_int32_t outsize = md->md_out_size;
int error = 0;
if (md->md_id & MWL_DIAG_IN) {
/*
* Copy in data.
*/
indata = malloc(insize, M_TEMP, M_NOWAIT);
if (indata == NULL) {
error = ENOMEM;
goto bad;
}
error = copyin(md->md_in_data, indata, insize);
if (error)
goto bad;
}
if (md->md_id & MWL_DIAG_DYN) {
/*
* Allocate a buffer for the results (otherwise the HAL
* returns a pointer to a buffer where we can read the
* results). Note that we depend on the HAL leaving this
* pointer for us to use below in reclaiming the buffer;
* may want to be more defensive.
*/
outdata = malloc(outsize, M_TEMP, M_NOWAIT);
if (outdata == NULL) {
error = ENOMEM;
goto bad;
}
}
if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) {
if (outsize < md->md_out_size)
md->md_out_size = outsize;
if (outdata != NULL)
error = copyout(outdata, md->md_out_data,
md->md_out_size);
} else {
error = EINVAL;
}
bad:
if ((md->md_id & MWL_DIAG_IN) && indata != NULL)
free(indata, M_TEMP);
if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL)
free(outdata, M_TEMP);
return error;
}
static int
mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md)
{
struct mwl_hal *mh = sc->sc_mh;
int error;
MWL_LOCK_ASSERT(sc);
if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) {
device_printf(sc->sc_dev, "unable to load firmware\n");
return EIO;
}
if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) {
device_printf(sc->sc_dev, "unable to fetch h/w specs\n");
return EIO;
}
error = mwl_setupdma(sc);
if (error != 0) {
/* NB: mwl_setupdma prints a msg */
return error;
}
/*
* Reset tx/rx data structures; after reload we must
* re-start the driver's notion of the next xmit/recv.
*/
mwl_draintxq(sc); /* clear pending frames */
mwl_resettxq(sc); /* rebuild tx q lists */
sc->sc_rxnext = NULL; /* force rx to start at the list head */
return 0;
}
#endif /* MWL_DIAGAPI */
-static int
-mwl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+mwl_parent(struct ieee80211com *ic)
{
-#define IS_RUNNING(ifp) \
- ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
- struct mwl_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *)data;
- int error = 0, startall;
+ struct mwl_softc *sc = ic->ic_softc;
+ int startall = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- MWL_LOCK(sc);
- startall = 0;
- if (IS_RUNNING(ifp)) {
+ MWL_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ if (sc->sc_running) {
/*
* To avoid rescanning another access point,
* do not call mwl_init() here. Instead,
* only reflect promisc mode settings.
*/
mwl_mode_init(sc);
- } else if (ifp->if_flags & IFF_UP) {
+ } else {
/*
* Beware of being called during attach/detach
* to reset promiscuous mode. In that case we
* will still be marked UP but not RUNNING.
* However trying to re-init the interface
* is the wrong thing to do as we've already
* torn down much of our state. There's
* probably a better way to deal with this.
*/
if (!sc->sc_invalid) {
- mwl_init_locked(sc); /* XXX lose error */
+ mwl_init(sc); /* XXX lose error */
startall = 1;
}
- } else
- mwl_stop_locked(ifp, 1);
- MWL_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
+ }
+ } else
+ mwl_stop(sc);
+ MWL_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
+}
+
+static int
+mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data)
+{
+ struct mwl_softc *sc = ic->ic_softc;
+ struct ifreq *ifr = data;
+ int error = 0;
+
+ switch (cmd) {
case SIOCGMVSTATS:
mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats);
+#if 0
/* NB: embed these numbers to get a consistent view */
sc->sc_stats.mst_tx_packets =
ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
sc->sc_stats.mst_rx_packets =
ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
+#endif
/*
* NB: Drop the softc lock in case of a page fault;
* we'll accept any potential inconsisentcy in the
* statistics. The alternative is to copy the data
* to a local structure.
*/
- return copyout(&sc->sc_stats,
- ifr->ifr_data, sizeof (sc->sc_stats));
+ return (copyout(&sc->sc_stats,
+ ifr->ifr_data, sizeof (sc->sc_stats)));
#ifdef MWL_DIAGAPI
case SIOCGMVDIAG:
/* XXX check privs */
return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr);
case SIOCGMVRESET:
/* XXX check privs */
MWL_LOCK(sc);
error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr);
MWL_UNLOCK(sc);
break;
#endif /* MWL_DIAGAPI */
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
default:
- error = EINVAL;
+ error = ENOTTY;
break;
}
- return error;
-#undef IS_RUNNING
+ return (error);
}
#ifdef MWL_DEBUG
static int
mwl_sysctl_debug(SYSCTL_HANDLER_ARGS)
{
struct mwl_softc *sc = arg1;
int debug, error;
debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24);
error = sysctl_handle_int(oidp, &debug, 0, req);
if (error || !req->newptr)
return error;
mwl_hal_setdebug(sc->sc_mh, debug >> 24);
sc->sc_debug = debug & 0x00ffffff;
return 0;
}
#endif /* MWL_DEBUG */
static void
mwl_sysctlattach(struct mwl_softc *sc)
{
#ifdef MWL_DEBUG
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
sc->sc_debug = mwl_debug;
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
mwl_sysctl_debug, "I", "control debugging printfs");
#endif
}
/*
* Announce various information on device/driver attach.
*/
static void
mwl_announce(struct mwl_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- if_printf(ifp, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
+ device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n",
sc->sc_hwspecs.hwVersion,
(sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff,
(sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff,
(sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff,
(sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff,
sc->sc_hwspecs.regionCode);
sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber;
if (bootverbose) {
int i;
for (i = 0; i <= WME_AC_VO; i++) {
struct mwl_txq *txq = sc->sc_ac2q[i];
- if_printf(ifp, "Use hw queue %u for %s traffic\n",
+ device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n",
txq->qnum, ieee80211_wme_acnames[i]);
}
}
if (bootverbose || mwl_rxdesc != MWL_RXDESC)
- if_printf(ifp, "using %u rx descriptors\n", mwl_rxdesc);
+ device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc);
if (bootverbose || mwl_rxbuf != MWL_RXBUF)
- if_printf(ifp, "using %u rx buffers\n", mwl_rxbuf);
+ device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf);
if (bootverbose || mwl_txbuf != MWL_TXBUF)
- if_printf(ifp, "using %u tx buffers\n", mwl_txbuf);
+ device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf);
if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh))
- if_printf(ifp, "multi-bss support\n");
+ device_printf(sc->sc_dev, "multi-bss support\n");
#ifdef MWL_TX_NODROP
if (bootverbose)
- if_printf(ifp, "no tx drop\n");
+ device_printf(sc->sc_dev, "no tx drop\n");
#endif
}
Index: head/sys/dev/mwl/if_mwl_pci.c
===================================================================
--- head/sys/dev/mwl/if_mwl_pci.c (revision 287196)
+++ head/sys/dev/mwl/if_mwl_pci.c (revision 287197)
@@ -1,292 +1,293 @@
/*-
* Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
* Copyright (c) 2007-2009 Marvell Semiconductor, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
#ifdef __FreeBSD__
__FBSDID("$FreeBSD$");
#endif
/*
* PCI front-end for the Marvell Wireless LAN controller driver.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
+#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/errno.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_arp.h>
#include <net/route.h>
#include <net80211/ieee80211_var.h>
#include <dev/mwl/if_mwlvar.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
/*
* PCI glue.
*/
struct mwl_pci_softc {
struct mwl_softc sc_sc;
struct resource *sc_sr0; /* BAR0 memory resource */
struct resource *sc_sr1; /* BAR1 memory resource */
struct resource *sc_irq; /* irq resource */
void *sc_ih; /* interrupt handler */
};
#define BS_BAR0 0x10
#define BS_BAR1 0x14
struct mwl_pci_ident {
uint16_t vendor;
uint16_t device;
const char *name;
};
static const struct mwl_pci_ident mwl_pci_ids[] = {
{ 0x11ab, 0x2a02, "Marvell 88W8363" },
{ 0x11ab, 0x2a03, "Marvell 88W8363" },
{ 0x11ab, 0x2a0a, "Marvell 88W8363" },
{ 0x11ab, 0x2a0b, "Marvell 88W8363" },
{ 0x11ab, 0x2a0c, "Marvell 88W8363" },
{ 0x11ab, 0x2a21, "Marvell 88W8363" },
{ 0x11ab, 0x2a24, "Marvell 88W8363" },
{ 0, 0, NULL }
};
const static struct mwl_pci_ident *
mwl_pci_lookup(int vendor, int device)
{
const struct mwl_pci_ident *ident;
for (ident = mwl_pci_ids; ident->name != NULL; ident++)
if (vendor == ident->vendor && device == ident->device)
return ident;
return NULL;
}
static int
mwl_pci_probe(device_t dev)
{
const struct mwl_pci_ident *ident;
ident = mwl_pci_lookup(pci_get_vendor(dev), pci_get_device(dev));
if (ident != NULL) {
device_set_desc(dev, ident->name);
return BUS_PROBE_DEFAULT;
}
return ENXIO;
}
static int
mwl_pci_attach(device_t dev)
{
struct mwl_pci_softc *psc = device_get_softc(dev);
struct mwl_softc *sc = &psc->sc_sc;
int rid, error = ENXIO;
sc->sc_dev = dev;
pci_enable_busmaster(dev);
/*
* Setup memory-mapping of PCI registers.
*/
rid = BS_BAR0;
psc->sc_sr0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (psc->sc_sr0 == NULL) {
device_printf(dev, "cannot map BAR0 register space\n");
goto bad;
}
rid = BS_BAR1;
psc->sc_sr1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (psc->sc_sr1 == NULL) {
device_printf(dev, "cannot map BAR1 register space\n");
goto bad1;
}
sc->sc_invalid = 1;
/*
* Arrange interrupt line.
*/
rid = 0;
psc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE|RF_ACTIVE);
if (psc->sc_irq == NULL) {
device_printf(dev, "could not map interrupt\n");
goto bad2;
}
if (bus_setup_intr(dev, psc->sc_irq,
INTR_TYPE_NET | INTR_MPSAFE,
NULL, mwl_intr, sc, &psc->sc_ih)) {
device_printf(dev, "could not establish interrupt\n");
goto bad3;
}
/*
* Setup DMA descriptor area.
*/
if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, 0, /* alignment, bounds */
BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE, /* maxsize */
MWL_TXDESC, /* nsegments */
BUS_SPACE_MAXSIZE, /* maxsegsize */
0, /* flags */
NULL, /* lockfunc */
NULL, /* lockarg */
&sc->sc_dmat)) {
device_printf(dev, "cannot allocate DMA tag\n");
goto bad4;
}
/*
* Finish off the attach.
*/
MWL_LOCK_INIT(sc);
sc->sc_io0t = rman_get_bustag(psc->sc_sr0);
sc->sc_io0h = rman_get_bushandle(psc->sc_sr0);
sc->sc_io1t = rman_get_bustag(psc->sc_sr1);
sc->sc_io1h = rman_get_bushandle(psc->sc_sr1);
if (mwl_attach(pci_get_device(dev), sc) == 0)
return (0);
MWL_LOCK_DESTROY(sc);
bus_dma_tag_destroy(sc->sc_dmat);
bad4:
bus_teardown_intr(dev, psc->sc_irq, psc->sc_ih);
bad3:
bus_release_resource(dev, SYS_RES_IRQ, 0, psc->sc_irq);
bad2:
bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR1, psc->sc_sr1);
bad1:
bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR0, psc->sc_sr0);
bad:
return (error);
}
static int
mwl_pci_detach(device_t dev)
{
struct mwl_pci_softc *psc = device_get_softc(dev);
struct mwl_softc *sc = &psc->sc_sc;
/* check if device was removed */
sc->sc_invalid = !bus_child_present(dev);
mwl_detach(sc);
bus_generic_detach(dev);
bus_teardown_intr(dev, psc->sc_irq, psc->sc_ih);
bus_release_resource(dev, SYS_RES_IRQ, 0, psc->sc_irq);
bus_dma_tag_destroy(sc->sc_dmat);
bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR1, psc->sc_sr1);
bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR0, psc->sc_sr0);
MWL_LOCK_DESTROY(sc);
return (0);
}
static int
mwl_pci_shutdown(device_t dev)
{
struct mwl_pci_softc *psc = device_get_softc(dev);
mwl_shutdown(&psc->sc_sc);
return (0);
}
static int
mwl_pci_suspend(device_t dev)
{
struct mwl_pci_softc *psc = device_get_softc(dev);
mwl_suspend(&psc->sc_sc);
return (0);
}
static int
mwl_pci_resume(device_t dev)
{
struct mwl_pci_softc *psc = device_get_softc(dev);
pci_enable_busmaster(dev);
mwl_resume(&psc->sc_sc);
return (0);
}
static device_method_t mwl_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, mwl_pci_probe),
DEVMETHOD(device_attach, mwl_pci_attach),
DEVMETHOD(device_detach, mwl_pci_detach),
DEVMETHOD(device_shutdown, mwl_pci_shutdown),
DEVMETHOD(device_suspend, mwl_pci_suspend),
DEVMETHOD(device_resume, mwl_pci_resume),
{ 0,0 }
};
static driver_t mwl_pci_driver = {
"mwl",
mwl_pci_methods,
sizeof (struct mwl_pci_softc)
};
static devclass_t mwl_devclass;
DRIVER_MODULE(mwl, pci, mwl_pci_driver, mwl_devclass, 0, 0);
MODULE_VERSION(mwl, 1);
MODULE_DEPEND(mwl, wlan, 1, 1, 1); /* 802.11 media layer */
MODULE_DEPEND(mwl, firmware, 1, 1, 1);
Index: head/sys/dev/mwl/if_mwlvar.h
===================================================================
--- head/sys/dev/mwl/if_mwlvar.h (revision 287196)
+++ head/sys/dev/mwl/if_mwlvar.h (revision 287197)
@@ -1,361 +1,363 @@
/*-
* Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
* Copyright (c) 2007-2009 Marvell Semiconductor, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
/*
* Definitions for the Marvell 88W8363 Wireless LAN controller.
*/
#ifndef _DEV_MWL_MVVAR_H
#define _DEV_MWL_MVVAR_H
#include <sys/endian.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/mwl/mwlhal.h>
#include <dev/mwl/mwlreg.h>
#include <dev/mwl/if_mwlioctl.h>
#ifndef MWL_TXBUF
#define MWL_TXBUF 256 /* number of TX descriptors/buffers */
#endif
#ifndef MWL_TXACKBUF
#define MWL_TXACKBUF (MWL_TXBUF/2) /* number of TX ACK desc's/buffers */
#endif
#ifndef MWL_RXDESC
#define MWL_RXDESC 256 /* number of RX descriptors */
#endif
#ifndef MWL_RXBUF
#define MWL_RXBUF ((5*MWL_RXDESC)/2)/* number of RX dma buffers */
#endif
#ifndef MWL_MAXBA
#define MWL_MAXBA 2 /* max BA streams/sta */
#endif
#ifdef MWL_SGDMA_SUPPORT
#define MWL_TXDESC 6 /* max tx descriptors/segments */
#else
#define MWL_TXDESC 1 /* max tx descriptors/segments */
#endif
#ifndef MWL_AGGR_SIZE
#define MWL_AGGR_SIZE 3839 /* max tx agregation size */
#endif
#define MWL_AGEINTERVAL 1 /* poke f/w every sec to age q's */
#define MWL_MAXSTAID 64 /* max of 64 stations */
/*
* DMA state for tx/rx descriptors.
*/
/*
* Software backed version of tx/rx descriptors. We keep
* the software state out of the h/w descriptor structure
* so that may be allocated in uncached memory w/o paying
* performance hit.
*/
struct mwl_txbuf {
STAILQ_ENTRY(mwl_txbuf) bf_list;
void *bf_desc; /* h/w descriptor */
bus_addr_t bf_daddr; /* physical addr of desc */
bus_dmamap_t bf_dmamap; /* DMA map for descriptors */
int bf_nseg;
bus_dma_segment_t bf_segs[MWL_TXDESC];
struct mbuf *bf_m;
struct ieee80211_node *bf_node;
struct mwl_txq *bf_txq; /* backpointer to tx q/ring */
};
typedef STAILQ_HEAD(, mwl_txbuf) mwl_txbufhead;
/*
* Common "base class" for tx/rx descriptor resources
* allocated using the bus dma api.
*/
struct mwl_descdma {
const char* dd_name;
void *dd_desc; /* descriptors */
bus_addr_t dd_desc_paddr; /* physical addr of dd_desc */
bus_size_t dd_desc_len; /* size of dd_desc */
bus_dma_segment_t dd_dseg;
int dd_dnseg; /* number of segments */
bus_dma_tag_t dd_dmat; /* bus DMA tag */
bus_dmamap_t dd_dmamap; /* DMA map for descriptors */
void *dd_bufptr; /* associated buffers */
};
/*
* TX/RX ring definitions. There are 4 tx rings, one
* per AC, and 1 rx ring. Note carefully that transmit
* descriptors are treated as a contiguous chunk and the
* firmware pre-fetches descriptors. This means that we
* must preserve order when moving descriptors between
* the active+free lists; otherwise we may stall transmit.
*/
struct mwl_txq {
struct mwl_descdma dma; /* bus dma resources */
struct mtx lock; /* tx q lock */
char name[12]; /* e.g. "mwl0_txq4" */
int qnum; /* f/w q number */
int txpri; /* f/w tx priority */
int nfree; /* # buffers on free list */
mwl_txbufhead free; /* queue of free buffers */
mwl_txbufhead active; /* queue of active buffers */
};
#define MWL_TXQ_LOCK_INIT(_sc, _tq) do { \
snprintf((_tq)->name, sizeof((_tq)->name), "%s_txq%u", \
device_get_nameunit((_sc)->sc_dev), (_tq)->qnum); \
mtx_init(&(_tq)->lock, (_tq)->name, NULL, MTX_DEF); \
} while (0)
#define MWL_TXQ_LOCK_DESTROY(_tq) mtx_destroy(&(_tq)->lock)
#define MWL_TXQ_LOCK(_tq) mtx_lock(&(_tq)->lock)
#define MWL_TXQ_UNLOCK(_tq) mtx_unlock(&(_tq)->lock)
#define MWL_TXQ_LOCK_ASSERT(_tq) mtx_assert(&(_tq)->lock, MA_OWNED)
#define MWL_TXDESC_SYNC(txq, ds, how) do { \
bus_dmamap_sync((txq)->dma.dd_dmat, (txq)->dma.dd_dmamap, how); \
} while(0)
/*
* RX dma buffers that are not in use are kept on a list.
*/
struct mwl_jumbo {
SLIST_ENTRY(mwl_jumbo) next;
};
typedef SLIST_HEAD(, mwl_jumbo) mwl_jumbohead;
#define MWL_JUMBO_DATA2BUF(_data) ((struct mwl_jumbo *)(_data))
#define MWL_JUMBO_BUF2DATA(_buf) ((uint8_t *)(_buf))
#define MWL_JUMBO_OFFSET(_sc, _data) \
(((const uint8_t *)(_data)) - (const uint8_t *)((_sc)->sc_rxmem))
#define MWL_JUMBO_DMA_ADDR(_sc, _data) \
((_sc)->sc_rxmem_paddr + MWL_JUMBO_OFFSET(_sc, _data))
struct mwl_rxbuf {
STAILQ_ENTRY(mwl_rxbuf) bf_list;
void *bf_desc; /* h/w descriptor */
bus_addr_t bf_daddr; /* physical addr of desc */
uint8_t *bf_data; /* rx data area */
};
typedef STAILQ_HEAD(, mwl_rxbuf) mwl_rxbufhead;
#define MWL_RXDESC_SYNC(sc, ds, how) do { \
bus_dmamap_sync((sc)->sc_rxdma.dd_dmat, (sc)->sc_rxdma.dd_dmamap, how);\
} while (0)
/*
* BA stream state. One of these is setup for each stream
* allocated/created for use. We pre-allocate the h/w stream
* before sending ADDBA request then complete the setup when
* get ADDBA response (success). The completed state is setup
* to optimize the fast path in mwl_txstart--we precalculate
* the QoS control bits in the outbound frame and use those
* to identify which BA stream to use (assigning the h/w q to
* the TxPriority field of the descriptor).
*
* NB: Each station may have at most MWL_MAXBA streams at one time.
*/
struct mwl_bastate {
uint16_t qos; /* QoS ctl for BA stream */
uint8_t txq; /* h/w q for BA stream */
const MWL_HAL_BASTREAM *bastream; /* A-MPDU BA stream */
};
static __inline__ void
mwl_bastream_setup(struct mwl_bastate *bas, int tid, int txq)
{
bas->txq = txq;
bas->qos = htole16(tid | IEEE80211_QOS_ACKPOLICY_BA);
}
static __inline__ void
mwl_bastream_free(struct mwl_bastate *bas)
{
bas->qos = 0;
bas->bastream = NULL;
/* NB: don't need to clear txq */
}
/*
* Check the QoS control bits from an outbound frame against the
* value calculated when a BA stream is setup (above). We need
* to match the TID and also the ACK policy so we only match AMPDU
* frames. The bits from the frame are assumed in network byte
* order, hence the potential byte swap.
*/
static __inline__ int
mwl_bastream_match(const struct mwl_bastate *bas, uint16_t qos)
{
return (qos & htole16(IEEE80211_QOS_TID|IEEE80211_QOS_ACKPOLICY)) ==
bas->qos;
}
/* driver-specific node state */
struct mwl_node {
struct ieee80211_node mn_node; /* base class */
struct mwl_ant_info mn_ai; /* antenna info */
uint32_t mn_avgrssi; /* average rssi over all rx frames */
uint16_t mn_staid; /* firmware station id */
struct mwl_bastate mn_ba[MWL_MAXBA];
struct mwl_hal_vap *mn_hvap; /* hal vap handle */
};
#define MWL_NODE(ni) ((struct mwl_node *)(ni))
#define MWL_NODE_CONST(ni) ((const struct mwl_node *)(ni))
/*
* Driver-specific vap state.
*/
struct mwl_vap {
struct ieee80211vap mv_vap; /* base class */
struct mwl_hal_vap *mv_hvap; /* hal vap handle */
struct mwl_hal_vap *mv_ap_hvap; /* ap hal vap handle for wds */
uint16_t mv_last_ps_sta; /* last count of ps sta's */
uint16_t mv_eapolformat; /* fixed tx rate for EAPOL */
int (*mv_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
int (*mv_set_tim)(struct ieee80211_node *, int);
};
#define MWL_VAP(vap) ((struct mwl_vap *)(vap))
#define MWL_VAP_CONST(vap) ((const struct mwl_vap *)(vap))
struct mwl_softc {
- struct ifnet *sc_ifp; /* interface common */
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
struct mwl_stats sc_stats; /* interface statistics */
int sc_debug;
device_t sc_dev;
bus_dma_tag_t sc_dmat; /* bus DMA tag */
bus_space_handle_t sc_io0h; /* BAR 0 */
bus_space_tag_t sc_io0t;
bus_space_handle_t sc_io1h; /* BAR 1 */
bus_space_tag_t sc_io1t;
struct mtx sc_mtx; /* master lock (recursive) */
struct taskqueue *sc_tq; /* private task queue */
struct callout sc_watchdog;
int sc_tx_timer;
- unsigned int sc_invalid : 1, /* disable hardware accesses */
+ unsigned int sc_running : 1,
+ sc_invalid : 1, /* disable hardware accesses */
sc_recvsetup:1, /* recv setup */
sc_csapending:1,/* 11h channel switch pending */
sc_radarena : 1,/* radar detection enabled */
sc_rxblocked: 1;/* rx waiting for dma buffers */
struct mwl_hal *sc_mh; /* h/w access layer */
struct mwl_hal_vap *sc_hvap; /* hal vap handle */
struct mwl_hal_hwspec sc_hwspecs; /* h/w capabilities */
uint32_t sc_fwrelease; /* release # of loaded f/w */
struct mwl_hal_txrxdma sc_hwdma; /* h/w dma setup */
uint32_t sc_imask; /* interrupt mask copy */
enum ieee80211_phymode sc_curmode;
u_int16_t sc_curaid; /* current association id */
u_int8_t sc_curbssid[IEEE80211_ADDR_LEN];
MWL_HAL_CHANNEL sc_curchan;
MWL_HAL_TXRATE_HANDLING sc_txratehandling;
u_int16_t sc_rxantenna; /* rx antenna */
u_int16_t sc_txantenna; /* tx antenna */
uint8_t sc_napvaps; /* # ap mode vaps */
uint8_t sc_nwdsvaps; /* # wds mode vaps */
uint8_t sc_nstavaps; /* # sta mode vaps */
uint8_t sc_ndwdsvaps; /* # sta mode dwds vaps */
uint8_t sc_nbssid0; /* # vap's using base mac */
uint32_t sc_bssidmask; /* bssid mask */
void (*sc_recv_mgmt)(struct ieee80211com *,
struct mbuf *,
struct ieee80211_node *,
int, int, int, u_int32_t);
int (*sc_newstate)(struct ieee80211com *,
enum ieee80211_state, int);
void (*sc_node_cleanup)(struct ieee80211_node *);
void (*sc_node_drain)(struct ieee80211_node *);
int (*sc_recv_action)(struct ieee80211_node *,
const struct ieee80211_frame *,
const uint8_t *, const uint8_t *);
int (*sc_addba_request)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *,
int dialogtoken, int baparamset,
int batimeout);
int (*sc_addba_response)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *,
int status, int baparamset,
int batimeout);
void (*sc_addba_stop)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
struct mwl_descdma sc_rxdma; /* rx bus dma resources */
mwl_rxbufhead sc_rxbuf; /* rx buffers */
struct mwl_rxbuf *sc_rxnext; /* next rx buffer to process */
struct task sc_rxtask; /* rx int processing */
void *sc_rxmem; /* rx dma buffer pool */
bus_dma_tag_t sc_rxdmat; /* rx bus DMA tag */
bus_size_t sc_rxmemsize; /* rx dma buffer pool size */
bus_dmamap_t sc_rxmap; /* map for rx dma buffers */
bus_addr_t sc_rxmem_paddr; /* physical addr of sc_rxmem */
mwl_jumbohead sc_rxfree; /* list of free dma buffers */
int sc_nrxfree; /* # buffers on rx free list */
struct mtx sc_rxlock; /* lock on sc_rxfree */
struct mwl_txq sc_txq[MWL_NUM_TX_QUEUES];
struct mwl_txq *sc_ac2q[5]; /* WME AC -> h/w q map */
struct mbuf *sc_aggrq; /* aggregation q */
struct task sc_txtask; /* tx int processing */
struct task sc_bawatchdogtask;/* BA watchdog processing */
struct task sc_radartask; /* radar detect processing */
struct task sc_chanswitchtask;/* chan switch processing */
uint8_t sc_staid[MWL_MAXSTAID/NBBY];
int sc_ageinterval;
struct callout sc_timer; /* periodic work */
struct mwl_tx_radiotap_header sc_tx_th;
struct mwl_rx_radiotap_header sc_rx_th;
};
#define MWL_LOCK_INIT(_sc) \
mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
NULL, MTX_DEF | MTX_RECURSE)
#define MWL_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
#define MWL_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define MWL_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define MWL_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
#define MWL_RXFREE_INIT(_sc) \
mtx_init(&(_sc)->sc_rxlock, device_get_nameunit((_sc)->sc_dev), \
NULL, MTX_DEF)
#define MWL_RXFREE_DESTROY(_sc) mtx_destroy(&(_sc)->sc_rxlock)
#define MWL_RXFREE_LOCK(_sc) mtx_lock(&(_sc)->sc_rxlock)
#define MWL_RXFREE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rxlock)
#define MWL_RXFREE_ASSERT(_sc) mtx_assert(&(_sc)->sc_rxlock, MA_OWNED)
int mwl_attach(u_int16_t, struct mwl_softc *);
int mwl_detach(struct mwl_softc *);
void mwl_resume(struct mwl_softc *);
void mwl_suspend(struct mwl_softc *);
void mwl_shutdown(void *);
void mwl_intr(void *);
#endif /* _DEV_MWL_MVVAR_H */
Index: head/sys/dev/ral/if_ral_pci.c
===================================================================
--- head/sys/dev/ral/if_ral_pci.c (revision 287196)
+++ head/sys/dev/ral/if_ral_pci.c (revision 287197)
@@ -1,319 +1,320 @@
/*-
* Copyright (c) 2005, 2006
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* PCI/Cardbus front-end for the Ralink RT2560/RT2561/RT2561S/RT2661 driver.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
+#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/socket.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/route.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <dev/ral/rt2560var.h>
#include <dev/ral/rt2661var.h>
#include <dev/ral/rt2860var.h>
MODULE_DEPEND(ral, pci, 1, 1, 1);
MODULE_DEPEND(ral, firmware, 1, 1, 1);
MODULE_DEPEND(ral, wlan, 1, 1, 1);
MODULE_DEPEND(ral, wlan_amrr, 1, 1, 1);
static int ral_msi_disable;
TUNABLE_INT("hw.ral.msi_disable", &ral_msi_disable);
struct ral_pci_ident {
uint16_t vendor;
uint16_t device;
const char *name;
};
static const struct ral_pci_ident ral_pci_ids[] = {
{ 0x1432, 0x7708, "Edimax RT2860" },
{ 0x1432, 0x7711, "Edimax RT3591" },
{ 0x1432, 0x7722, "Edimax RT3591" },
{ 0x1432, 0x7727, "Edimax RT2860" },
{ 0x1432, 0x7728, "Edimax RT2860" },
{ 0x1432, 0x7738, "Edimax RT2860" },
{ 0x1432, 0x7748, "Edimax RT2860" },
{ 0x1432, 0x7758, "Edimax RT2860" },
{ 0x1432, 0x7768, "Edimax RT2860" },
{ 0x1462, 0x891a, "MSI RT3090" },
{ 0x1814, 0x0201, "Ralink Technology RT2560" },
{ 0x1814, 0x0301, "Ralink Technology RT2561S" },
{ 0x1814, 0x0302, "Ralink Technology RT2561" },
{ 0x1814, 0x0401, "Ralink Technology RT2661" },
{ 0x1814, 0x0601, "Ralink Technology RT2860" },
{ 0x1814, 0x0681, "Ralink Technology RT2890" },
{ 0x1814, 0x0701, "Ralink Technology RT2760" },
{ 0x1814, 0x0781, "Ralink Technology RT2790" },
{ 0x1814, 0x3060, "Ralink Technology RT3060" },
{ 0x1814, 0x3062, "Ralink Technology RT3062" },
{ 0x1814, 0x3090, "Ralink Technology RT3090" },
{ 0x1814, 0x3091, "Ralink Technology RT3091" },
{ 0x1814, 0x3092, "Ralink Technology RT3092" },
{ 0x1814, 0x3390, "Ralink Technology RT3390" },
{ 0x1814, 0x3562, "Ralink Technology RT3562" },
{ 0x1814, 0x3592, "Ralink Technology RT3592" },
{ 0x1814, 0x3593, "Ralink Technology RT3593" },
{ 0x1814, 0x5360, "Ralink Technology RT5390" },
{ 0x1814, 0x5362, "Ralink Technology RT5392" },
{ 0x1814, 0x5390, "Ralink Technology RT5390" },
{ 0x1814, 0x5392, "Ralink Technology RT5392" },
{ 0x1814, 0x539a, "Ralink Technology RT5390" },
{ 0x1814, 0x539f, "Ralink Technology RT5390" },
{ 0x1a3b, 0x1059, "AWT RT2890" },
{ 0, 0, NULL }
};
static const struct ral_opns {
int (*attach)(device_t, int);
int (*detach)(void *);
void (*shutdown)(void *);
void (*suspend)(void *);
void (*resume)(void *);
void (*intr)(void *);
} ral_rt2560_opns = {
rt2560_attach,
rt2560_detach,
rt2560_stop,
rt2560_stop,
rt2560_resume,
rt2560_intr
}, ral_rt2661_opns = {
rt2661_attach,
rt2661_detach,
rt2661_shutdown,
rt2661_suspend,
rt2661_resume,
rt2661_intr
}, ral_rt2860_opns = {
rt2860_attach,
rt2860_detach,
rt2860_shutdown,
rt2860_suspend,
rt2860_resume,
rt2860_intr
};
struct ral_pci_softc {
union {
struct rt2560_softc sc_rt2560;
struct rt2661_softc sc_rt2661;
struct rt2860_softc sc_rt2860;
} u;
const struct ral_opns *sc_opns;
struct resource *irq;
struct resource *mem;
void *sc_ih;
};
static int ral_pci_probe(device_t);
static int ral_pci_attach(device_t);
static int ral_pci_detach(device_t);
static int ral_pci_shutdown(device_t);
static int ral_pci_suspend(device_t);
static int ral_pci_resume(device_t);
static device_method_t ral_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ral_pci_probe),
DEVMETHOD(device_attach, ral_pci_attach),
DEVMETHOD(device_detach, ral_pci_detach),
DEVMETHOD(device_shutdown, ral_pci_shutdown),
DEVMETHOD(device_suspend, ral_pci_suspend),
DEVMETHOD(device_resume, ral_pci_resume),
DEVMETHOD_END
};
static driver_t ral_pci_driver = {
"ral",
ral_pci_methods,
sizeof (struct ral_pci_softc)
};
static devclass_t ral_devclass;
DRIVER_MODULE(ral, pci, ral_pci_driver, ral_devclass, NULL, NULL);
static int
ral_pci_probe(device_t dev)
{
const struct ral_pci_ident *ident;
for (ident = ral_pci_ids; ident->name != NULL; ident++) {
if (pci_get_vendor(dev) == ident->vendor &&
pci_get_device(dev) == ident->device) {
device_set_desc(dev, ident->name);
return (BUS_PROBE_DEFAULT);
}
}
return ENXIO;
}
static int
ral_pci_attach(device_t dev)
{
struct ral_pci_softc *psc = device_get_softc(dev);
struct rt2560_softc *sc = &psc->u.sc_rt2560;
int count, error, rid;
pci_enable_busmaster(dev);
switch (pci_get_device(dev)) {
case 0x0201:
psc->sc_opns = &ral_rt2560_opns;
break;
case 0x0301:
case 0x0302:
case 0x0401:
psc->sc_opns = &ral_rt2661_opns;
break;
default:
psc->sc_opns = &ral_rt2860_opns;
break;
}
rid = PCIR_BAR(0);
psc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (psc->mem == NULL) {
device_printf(dev, "could not allocate memory resource\n");
return ENXIO;
}
sc->sc_st = rman_get_bustag(psc->mem);
sc->sc_sh = rman_get_bushandle(psc->mem);
sc->sc_invalid = 1;
rid = 0;
if (ral_msi_disable == 0) {
count = 1;
if (pci_alloc_msi(dev, &count) == 0)
rid = 1;
}
psc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
(rid != 0 ? 0 : RF_SHAREABLE));
if (psc->irq == NULL) {
device_printf(dev, "could not allocate interrupt resource\n");
pci_release_msi(dev);
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(psc->mem), psc->mem);
return ENXIO;
}
error = (*psc->sc_opns->attach)(dev, pci_get_device(dev));
if (error != 0) {
(void)ral_pci_detach(dev);
return error;
}
/*
* Hook our interrupt after all initialization is complete.
*/
error = bus_setup_intr(dev, psc->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, psc->sc_opns->intr, psc, &psc->sc_ih);
if (error != 0) {
device_printf(dev, "could not set up interrupt\n");
(void)ral_pci_detach(dev);
return error;
}
sc->sc_invalid = 0;
return 0;
}
static int
ral_pci_detach(device_t dev)
{
struct ral_pci_softc *psc = device_get_softc(dev);
struct rt2560_softc *sc = &psc->u.sc_rt2560;
/* check if device was removed */
sc->sc_invalid = !bus_child_present(dev);
if (psc->sc_ih != NULL)
bus_teardown_intr(dev, psc->irq, psc->sc_ih);
(*psc->sc_opns->detach)(psc);
bus_generic_detach(dev);
bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(psc->irq),
psc->irq);
pci_release_msi(dev);
bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(psc->mem),
psc->mem);
return 0;
}
static int
ral_pci_shutdown(device_t dev)
{
struct ral_pci_softc *psc = device_get_softc(dev);
(*psc->sc_opns->shutdown)(psc);
return 0;
}
static int
ral_pci_suspend(device_t dev)
{
struct ral_pci_softc *psc = device_get_softc(dev);
(*psc->sc_opns->suspend)(psc);
return 0;
}
static int
ral_pci_resume(device_t dev)
{
struct ral_pci_softc *psc = device_get_softc(dev);
(*psc->sc_opns->resume)(psc);
return 0;
}
Index: head/sys/dev/ral/rt2560.c
===================================================================
--- head/sys/dev/ral/rt2560.c (revision 287196)
+++ head/sys/dev/ral/rt2560.c (revision 287197)
@@ -1,2827 +1,2756 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2005, 2006
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Ralink Technology RT2560 chipset driver
* http://www.ralinktech.com/
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <dev/ral/rt2560reg.h>
#include <dev/ral/rt2560var.h>
#define RT2560_RSSI(sc, rssi) \
((rssi) > (RT2560_NOISE_FLOOR + (sc)->rssi_corr) ? \
((rssi) - RT2560_NOISE_FLOOR - (sc)->rssi_corr) : 0)
#define RAL_DEBUG
#ifdef RAL_DEBUG
#define DPRINTF(sc, fmt, ...) do { \
if (sc->sc_debug > 0) \
printf(fmt, __VA_ARGS__); \
} while (0)
#define DPRINTFN(sc, n, fmt, ...) do { \
if (sc->sc_debug >= (n)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, fmt, ...)
#define DPRINTFN(sc, n, fmt, ...)
#endif
static struct ieee80211vap *rt2560_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode,
int, const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void rt2560_vap_delete(struct ieee80211vap *);
static void rt2560_dma_map_addr(void *, bus_dma_segment_t *, int,
int);
static int rt2560_alloc_tx_ring(struct rt2560_softc *,
struct rt2560_tx_ring *, int);
static void rt2560_reset_tx_ring(struct rt2560_softc *,
struct rt2560_tx_ring *);
static void rt2560_free_tx_ring(struct rt2560_softc *,
struct rt2560_tx_ring *);
static int rt2560_alloc_rx_ring(struct rt2560_softc *,
struct rt2560_rx_ring *, int);
static void rt2560_reset_rx_ring(struct rt2560_softc *,
struct rt2560_rx_ring *);
static void rt2560_free_rx_ring(struct rt2560_softc *,
struct rt2560_rx_ring *);
static int rt2560_newstate(struct ieee80211vap *,
enum ieee80211_state, int);
static uint16_t rt2560_eeprom_read(struct rt2560_softc *, uint8_t);
static void rt2560_encryption_intr(struct rt2560_softc *);
static void rt2560_tx_intr(struct rt2560_softc *);
static void rt2560_prio_intr(struct rt2560_softc *);
static void rt2560_decryption_intr(struct rt2560_softc *);
static void rt2560_rx_intr(struct rt2560_softc *);
static void rt2560_beacon_update(struct ieee80211vap *, int item);
static void rt2560_beacon_expire(struct rt2560_softc *);
static void rt2560_wakeup_expire(struct rt2560_softc *);
static void rt2560_scan_start(struct ieee80211com *);
static void rt2560_scan_end(struct ieee80211com *);
static void rt2560_set_channel(struct ieee80211com *);
static void rt2560_setup_tx_desc(struct rt2560_softc *,
struct rt2560_tx_desc *, uint32_t, int, int, int,
bus_addr_t);
static int rt2560_tx_bcn(struct rt2560_softc *, struct mbuf *,
struct ieee80211_node *);
static int rt2560_tx_mgt(struct rt2560_softc *, struct mbuf *,
struct ieee80211_node *);
static int rt2560_tx_data(struct rt2560_softc *, struct mbuf *,
struct ieee80211_node *);
-static void rt2560_start_locked(struct ifnet *);
-static void rt2560_start(struct ifnet *);
+static int rt2560_transmit(struct ieee80211com *, struct mbuf *);
+static void rt2560_start(struct rt2560_softc *);
static void rt2560_watchdog(void *);
-static int rt2560_ioctl(struct ifnet *, u_long, caddr_t);
+static void rt2560_parent(struct ieee80211com *);
static void rt2560_bbp_write(struct rt2560_softc *, uint8_t,
uint8_t);
static uint8_t rt2560_bbp_read(struct rt2560_softc *, uint8_t);
static void rt2560_rf_write(struct rt2560_softc *, uint8_t,
uint32_t);
static void rt2560_set_chan(struct rt2560_softc *,
struct ieee80211_channel *);
#if 0
static void rt2560_disable_rf_tune(struct rt2560_softc *);
#endif
static void rt2560_enable_tsf_sync(struct rt2560_softc *);
static void rt2560_enable_tsf(struct rt2560_softc *);
static void rt2560_update_plcp(struct rt2560_softc *);
static void rt2560_update_slot(struct ieee80211com *);
static void rt2560_set_basicrates(struct rt2560_softc *,
const struct ieee80211_rateset *);
static void rt2560_update_led(struct rt2560_softc *, int, int);
static void rt2560_set_bssid(struct rt2560_softc *, const uint8_t *);
-static void rt2560_set_macaddr(struct rt2560_softc *, uint8_t *);
+static void rt2560_set_macaddr(struct rt2560_softc *,
+ const uint8_t *);
static void rt2560_get_macaddr(struct rt2560_softc *, uint8_t *);
static void rt2560_update_promisc(struct ieee80211com *);
static const char *rt2560_get_rf(int);
static void rt2560_read_config(struct rt2560_softc *);
static int rt2560_bbp_init(struct rt2560_softc *);
static void rt2560_set_txantenna(struct rt2560_softc *, int);
static void rt2560_set_rxantenna(struct rt2560_softc *, int);
static void rt2560_init_locked(struct rt2560_softc *);
static void rt2560_init(void *);
static void rt2560_stop_locked(struct rt2560_softc *);
static int rt2560_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static const struct {
uint32_t reg;
uint32_t val;
} rt2560_def_mac[] = {
RT2560_DEF_MAC
};
static const struct {
uint8_t reg;
uint8_t val;
} rt2560_def_bbp[] = {
RT2560_DEF_BBP
};
static const uint32_t rt2560_rf2522_r2[] = RT2560_RF2522_R2;
static const uint32_t rt2560_rf2523_r2[] = RT2560_RF2523_R2;
static const uint32_t rt2560_rf2524_r2[] = RT2560_RF2524_R2;
static const uint32_t rt2560_rf2525_r2[] = RT2560_RF2525_R2;
static const uint32_t rt2560_rf2525_hi_r2[] = RT2560_RF2525_HI_R2;
static const uint32_t rt2560_rf2525e_r2[] = RT2560_RF2525E_R2;
static const uint32_t rt2560_rf2526_r2[] = RT2560_RF2526_R2;
static const uint32_t rt2560_rf2526_hi_r2[] = RT2560_RF2526_HI_R2;
static const struct {
uint8_t chan;
uint32_t r1, r2, r4;
} rt2560_rf5222[] = {
RT2560_RF5222
};
int
rt2560_attach(device_t dev, int id)
{
struct rt2560_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic;
- struct ifnet *ifp;
- int error;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t bands;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
+ int error;
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF | MTX_RECURSE);
callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
/* retrieve RT2560 rev. no */
sc->asic_rev = RAL_READ(sc, RT2560_CSR0);
/* retrieve RF rev. no and various other things from EEPROM */
rt2560_read_config(sc);
device_printf(dev, "MAC/BBP RT2560 (rev 0x%02x), RF %s\n",
sc->asic_rev, rt2560_get_rf(sc->rf_rev));
/*
* Allocate Tx and Rx rings.
*/
error = rt2560_alloc_tx_ring(sc, &sc->txq, RT2560_TX_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Tx ring\n");
goto fail1;
}
error = rt2560_alloc_tx_ring(sc, &sc->atimq, RT2560_ATIM_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate ATIM ring\n");
goto fail2;
}
error = rt2560_alloc_tx_ring(sc, &sc->prioq, RT2560_PRIO_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Prio ring\n");
goto fail3;
}
error = rt2560_alloc_tx_ring(sc, &sc->bcnq, RT2560_BEACON_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Beacon ring\n");
goto fail4;
}
error = rt2560_alloc_rx_ring(sc, &sc->rxq, RT2560_RX_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Rx ring\n");
goto fail5;
}
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- goto fail6;
- }
- ic = ifp->if_l2com;
-
/* retrieve MAC address */
- rt2560_get_macaddr(sc, macaddr);
+ rt2560_get_macaddr(sc, ic->ic_macaddr);
- ifp->if_softc = sc;
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = rt2560_init;
- ifp->if_ioctl = rt2560_ioctl;
- ifp->if_start = rt2560_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode */
| IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
| IEEE80211_C_HOSTAP /* hostap mode */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
| IEEE80211_C_WDS /* 4-address traffic works */
| IEEE80211_C_MBSS /* mesh point link mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
#ifdef notyet
| IEEE80211_C_TXFRAG /* handle tx frags */
#endif
;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
if (sc->rf_rev == RT2560_RF_5222)
setbit(&bands, IEEE80211_MODE_11A);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
ic->ic_raw_xmit = rt2560_raw_xmit;
ic->ic_updateslot = rt2560_update_slot;
ic->ic_update_promisc = rt2560_update_promisc;
ic->ic_scan_start = rt2560_scan_start;
ic->ic_scan_end = rt2560_scan_end;
ic->ic_set_channel = rt2560_set_channel;
ic->ic_vap_create = rt2560_vap_create;
ic->ic_vap_delete = rt2560_vap_delete;
+ ic->ic_parent = rt2560_parent;
+ ic->ic_transmit = rt2560_transmit;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
RT2560_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
RT2560_RX_RADIOTAP_PRESENT);
/*
* Add a few sysctl knobs.
*/
#ifdef RAL_DEBUG
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs");
#endif
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"txantenna", CTLFLAG_RW, &sc->tx_ant, 0, "tx antenna (0=auto)");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"rxantenna", CTLFLAG_RW, &sc->rx_ant, 0, "rx antenna (0=auto)");
if (bootverbose)
ieee80211_announce(ic);
return 0;
-fail6: rt2560_free_rx_ring(sc, &sc->rxq);
fail5: rt2560_free_tx_ring(sc, &sc->bcnq);
fail4: rt2560_free_tx_ring(sc, &sc->prioq);
fail3: rt2560_free_tx_ring(sc, &sc->atimq);
fail2: rt2560_free_tx_ring(sc, &sc->txq);
fail1: mtx_destroy(&sc->sc_mtx);
return ENXIO;
}
int
rt2560_detach(void *xsc)
{
struct rt2560_softc *sc = xsc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
rt2560_stop(sc);
ieee80211_ifdetach(ic);
+ mbufq_drain(&sc->sc_snd);
rt2560_free_tx_ring(sc, &sc->txq);
rt2560_free_tx_ring(sc, &sc->atimq);
rt2560_free_tx_ring(sc, &sc->prioq);
rt2560_free_tx_ring(sc, &sc->bcnq);
rt2560_free_rx_ring(sc, &sc->rxq);
- if_free(ifp);
-
mtx_destroy(&sc->sc_mtx);
return 0;
}
static struct ieee80211vap *
rt2560_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
- struct ifnet *ifp = ic->ic_ifp;
+ struct rt2560_softc *sc = ic->ic_softc;
struct rt2560_vap *rvp;
struct ieee80211vap *vap;
switch (opmode) {
case IEEE80211_M_STA:
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
case IEEE80211_M_MONITOR:
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
/* XXXRP: TBD */
if (!TAILQ_EMPTY(&ic->ic_vaps)) {
- if_printf(ifp, "only 1 vap supported\n");
+ device_printf(sc->sc_dev, "only 1 vap supported\n");
return NULL;
}
if (opmode == IEEE80211_M_STA)
flags |= IEEE80211_CLONE_NOBEACONS;
break;
case IEEE80211_M_WDS:
if (TAILQ_EMPTY(&ic->ic_vaps) ||
ic->ic_opmode != IEEE80211_M_HOSTAP) {
- if_printf(ifp, "wds only supported in ap mode\n");
+ device_printf(sc->sc_dev,
+ "wds only supported in ap mode\n");
return NULL;
}
/*
* Silently remove any request for a unique
* bssid; WDS vap's always share the local
* mac address.
*/
flags &= ~IEEE80211_CLONE_BSSID;
break;
default:
- if_printf(ifp, "unknown opmode %d\n", opmode);
+ device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
return NULL;
}
- rvp = (struct rt2560_vap *) malloc(sizeof(struct rt2560_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (rvp == NULL)
- return NULL;
+ rvp = malloc(sizeof(struct rt2560_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &rvp->ral_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override state transition machine */
rvp->ral_newstate = vap->iv_newstate;
vap->iv_newstate = rt2560_newstate;
vap->iv_update_beacon = rt2560_beacon_update;
ieee80211_ratectl_init(vap);
/* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, ieee80211_media_change,
+ ieee80211_media_status, mac);
if (TAILQ_FIRST(&ic->ic_vaps) == vap)
ic->ic_opmode = opmode;
return vap;
}
static void
rt2560_vap_delete(struct ieee80211vap *vap)
{
struct rt2560_vap *rvp = RT2560_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(rvp, M_80211_VAP);
}
void
rt2560_resume(void *xsc)
{
struct rt2560_softc *sc = xsc;
- struct ifnet *ifp = sc->sc_ifp;
- if (ifp->if_flags & IFF_UP)
+ if (sc->sc_ic.ic_nrunning > 0)
rt2560_init(sc);
}
static void
rt2560_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
rt2560_alloc_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring,
int count)
{
int i, error;
ring->count = count;
ring->queued = 0;
ring->cur = ring->next = 0;
ring->cur_encrypt = ring->next_encrypt = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * RT2560_TX_DESC_SIZE, 1, count * RT2560_TX_DESC_SIZE,
0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * RT2560_TX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr,
0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
ring->data = malloc(count * sizeof (struct rt2560_tx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
MCLBYTES, RT2560_MAX_SCATTER, MCLBYTES, 0, NULL, NULL,
&ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
error = bus_dmamap_create(ring->data_dmat, 0,
&ring->data[i].map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
}
return 0;
fail: rt2560_free_tx_ring(sc, ring);
return error;
}
static void
rt2560_reset_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring)
{
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
int i;
for (i = 0; i < ring->count; i++) {
desc = &ring->desc[i];
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
desc->flags = 0;
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
ring->queued = 0;
ring->cur = ring->next = 0;
ring->cur_encrypt = ring->next_encrypt = 0;
}
static void
rt2560_free_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring)
{
struct rt2560_tx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->ni != NULL)
ieee80211_free_node(data->ni);
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
rt2560_alloc_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring,
int count)
{
struct rt2560_rx_desc *desc;
struct rt2560_rx_data *data;
bus_addr_t physaddr;
int i, error;
ring->count = count;
ring->cur = ring->next = 0;
ring->cur_decrypt = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * RT2560_RX_DESC_SIZE, 1, count * RT2560_RX_DESC_SIZE,
0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * RT2560_RX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr,
0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
ring->data = malloc(count * sizeof (struct rt2560_rx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
/*
* Pre-allocate Rx buffers and populate Rx ring.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
desc = &sc->rxq.desc[i];
data = &sc->rxq.data[i];
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr,
&physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load rx buf DMA map");
goto fail;
}
desc->flags = htole32(RT2560_RX_BUSY);
desc->physaddr = htole32(physaddr);
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
return 0;
fail: rt2560_free_rx_ring(sc, ring);
return error;
}
static void
rt2560_reset_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring)
{
int i;
for (i = 0; i < ring->count; i++) {
ring->desc[i].flags = htole32(RT2560_RX_BUSY);
ring->data[i].drop = 0;
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
ring->cur = ring->next = 0;
ring->cur_decrypt = 0;
}
static void
rt2560_free_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring)
{
struct rt2560_rx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
rt2560_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct rt2560_vap *rvp = RT2560_VAP(vap);
- struct ifnet *ifp = vap->iv_ic->ic_ifp;
- struct rt2560_softc *sc = ifp->if_softc;
+ struct rt2560_softc *sc = vap->iv_ic->ic_softc;
int error;
if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) {
/* abort TSF synchronization */
RAL_WRITE(sc, RT2560_CSR14, 0);
/* turn association led off */
rt2560_update_led(sc, 0, 0);
}
error = rvp->ral_newstate(vap, nstate, arg);
if (error == 0 && nstate == IEEE80211_S_RUN) {
struct ieee80211_node *ni = vap->iv_bss;
struct mbuf *m;
if (vap->iv_opmode != IEEE80211_M_MONITOR) {
rt2560_update_plcp(sc);
rt2560_set_basicrates(sc, &ni->ni_rates);
rt2560_set_bssid(sc, ni->ni_bssid);
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_MBSS) {
m = ieee80211_beacon_alloc(ni, &rvp->ral_bo);
if (m == NULL) {
- if_printf(ifp, "could not allocate beacon\n");
+ device_printf(sc->sc_dev,
+ "could not allocate beacon\n");
return ENOBUFS;
}
ieee80211_ref_node(ni);
error = rt2560_tx_bcn(sc, m, ni);
if (error != 0)
return error;
}
/* turn assocation led on */
rt2560_update_led(sc, 1, 0);
if (vap->iv_opmode != IEEE80211_M_MONITOR)
rt2560_enable_tsf_sync(sc);
else
rt2560_enable_tsf(sc);
}
return error;
}
/*
* Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or
* 93C66).
*/
static uint16_t
rt2560_eeprom_read(struct rt2560_softc *sc, uint8_t addr)
{
uint32_t tmp;
uint16_t val;
int n;
/* clock C once before the first command */
RT2560_EEPROM_CTL(sc, 0);
RT2560_EEPROM_CTL(sc, RT2560_S);
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C);
RT2560_EEPROM_CTL(sc, RT2560_S);
/* write start bit (1) */
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D);
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C);
/* write READ opcode (10) */
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D);
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C);
RT2560_EEPROM_CTL(sc, RT2560_S);
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C);
/* write address (A5-A0 or A7-A0) */
n = (RAL_READ(sc, RT2560_CSR21) & RT2560_93C46) ? 5 : 7;
for (; n >= 0; n--) {
RT2560_EEPROM_CTL(sc, RT2560_S |
(((addr >> n) & 1) << RT2560_SHIFT_D));
RT2560_EEPROM_CTL(sc, RT2560_S |
(((addr >> n) & 1) << RT2560_SHIFT_D) | RT2560_C);
}
RT2560_EEPROM_CTL(sc, RT2560_S);
/* read data Q15-Q0 */
val = 0;
for (n = 15; n >= 0; n--) {
RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C);
tmp = RAL_READ(sc, RT2560_CSR21);
val |= ((tmp & RT2560_Q) >> RT2560_SHIFT_Q) << n;
RT2560_EEPROM_CTL(sc, RT2560_S);
}
RT2560_EEPROM_CTL(sc, 0);
/* clear Chip Select and clock C */
RT2560_EEPROM_CTL(sc, RT2560_S);
RT2560_EEPROM_CTL(sc, 0);
RT2560_EEPROM_CTL(sc, RT2560_C);
return val;
}
/*
* Some frames were processed by the hardware cipher engine and are ready for
* transmission.
*/
static void
rt2560_encryption_intr(struct rt2560_softc *sc)
{
struct rt2560_tx_desc *desc;
int hw;
/* retrieve last descriptor index processed by cipher engine */
hw = RAL_READ(sc, RT2560_SECCSR1) - sc->txq.physaddr;
hw /= RT2560_TX_DESC_SIZE;
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_POSTREAD);
while (sc->txq.next_encrypt != hw) {
if (sc->txq.next_encrypt == sc->txq.cur_encrypt) {
printf("hw encrypt %d, cur_encrypt %d\n", hw,
sc->txq.cur_encrypt);
break;
}
desc = &sc->txq.desc[sc->txq.next_encrypt];
if ((le32toh(desc->flags) & RT2560_TX_BUSY) ||
(le32toh(desc->flags) & RT2560_TX_CIPHER_BUSY))
break;
/* for TKIP, swap eiv field to fix a bug in ASIC */
if ((le32toh(desc->flags) & RT2560_TX_CIPHER_MASK) ==
RT2560_TX_CIPHER_TKIP)
desc->eiv = bswap32(desc->eiv);
/* mark the frame ready for transmission */
desc->flags |= htole32(RT2560_TX_VALID);
desc->flags |= htole32(RT2560_TX_BUSY);
DPRINTFN(sc, 15, "encryption done idx=%u\n",
sc->txq.next_encrypt);
sc->txq.next_encrypt =
(sc->txq.next_encrypt + 1) % RT2560_TX_RING_COUNT;
}
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_PREWRITE);
/* kick Tx */
RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_TX);
}
static void
rt2560_tx_intr(struct rt2560_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct mbuf *m;
- uint32_t flags;
- int retrycnt;
struct ieee80211vap *vap;
struct ieee80211_node *ni;
+ uint32_t flags;
+ int retrycnt, status;
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_POSTREAD);
for (;;) {
desc = &sc->txq.desc[sc->txq.next];
data = &sc->txq.data[sc->txq.next];
flags = le32toh(desc->flags);
if ((flags & RT2560_TX_BUSY) ||
(flags & RT2560_TX_CIPHER_BUSY) ||
!(flags & RT2560_TX_VALID))
break;
m = data->m;
ni = data->ni;
vap = ni->ni_vap;
switch (flags & RT2560_TX_RESULT_MASK) {
case RT2560_TX_SUCCESS:
retrycnt = 0;
DPRINTFN(sc, 10, "%s\n", "data frame sent successfully");
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_SUCCESS,
&retrycnt, NULL);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ status = 0;
break;
case RT2560_TX_SUCCESS_RETRY:
retrycnt = RT2560_TX_RETRYCNT(flags);
DPRINTFN(sc, 9, "data frame sent after %u retries\n",
retrycnt);
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_SUCCESS,
&retrycnt, NULL);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ status = 0;
break;
case RT2560_TX_FAIL_RETRY:
retrycnt = RT2560_TX_RETRYCNT(flags);
DPRINTFN(sc, 9, "data frame failed after %d retries\n",
retrycnt);
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_FAILURE,
&retrycnt, NULL);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ status = 1;
break;
case RT2560_TX_FAIL_INVALID:
case RT2560_TX_FAIL_OTHER:
default:
device_printf(sc->sc_dev, "sending data frame failed "
"0x%08x\n", flags);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ status = 1;
}
bus_dmamap_sync(sc->txq.data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txq.data_dmat, data->map);
- m_freem(m);
- data->m = NULL;
- ieee80211_free_node(data->ni);
+
+ ieee80211_tx_complete(ni, m, status);
data->ni = NULL;
+ data->m = NULL;
/* descriptor is no longer valid */
desc->flags &= ~htole32(RT2560_TX_VALID);
DPRINTFN(sc, 15, "tx done idx=%u\n", sc->txq.next);
sc->txq.queued--;
sc->txq.next = (sc->txq.next + 1) % RT2560_TX_RING_COUNT;
}
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_PREWRITE);
if (sc->prioq.queued == 0 && sc->txq.queued == 0)
sc->sc_tx_timer = 0;
- if (sc->txq.queued < RT2560_TX_RING_COUNT - 1) {
- sc->sc_flags &= ~RT2560_F_DATA_OACTIVE;
- if ((sc->sc_flags &
- (RT2560_F_DATA_OACTIVE | RT2560_F_PRIO_OACTIVE)) == 0)
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- rt2560_start_locked(ifp);
- }
+ if (sc->txq.queued < RT2560_TX_RING_COUNT - 1)
+ rt2560_start(sc);
}
static void
rt2560_prio_intr(struct rt2560_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct ieee80211_node *ni;
struct mbuf *m;
int flags;
bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map,
BUS_DMASYNC_POSTREAD);
for (;;) {
desc = &sc->prioq.desc[sc->prioq.next];
data = &sc->prioq.data[sc->prioq.next];
flags = le32toh(desc->flags);
if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_VALID) == 0)
break;
switch (flags & RT2560_TX_RESULT_MASK) {
case RT2560_TX_SUCCESS:
DPRINTFN(sc, 10, "%s\n", "mgt frame sent successfully");
break;
case RT2560_TX_SUCCESS_RETRY:
DPRINTFN(sc, 9, "mgt frame sent after %u retries\n",
(flags >> 5) & 0x7);
break;
case RT2560_TX_FAIL_RETRY:
DPRINTFN(sc, 9, "%s\n",
"sending mgt frame failed (too much retries)");
break;
case RT2560_TX_FAIL_INVALID:
case RT2560_TX_FAIL_OTHER:
default:
device_printf(sc->sc_dev, "sending mgt frame failed "
"0x%08x\n", flags);
break;
}
bus_dmamap_sync(sc->prioq.data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->prioq.data_dmat, data->map);
m = data->m;
data->m = NULL;
ni = data->ni;
data->ni = NULL;
/* descriptor is no longer valid */
desc->flags &= ~htole32(RT2560_TX_VALID);
DPRINTFN(sc, 15, "prio done idx=%u\n", sc->prioq.next);
sc->prioq.queued--;
sc->prioq.next = (sc->prioq.next + 1) % RT2560_PRIO_RING_COUNT;
if (m->m_flags & M_TXCB)
ieee80211_process_callback(ni, m,
(flags & RT2560_TX_RESULT_MASK) &~
(RT2560_TX_SUCCESS | RT2560_TX_SUCCESS_RETRY));
m_freem(m);
ieee80211_free_node(ni);
}
bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map,
BUS_DMASYNC_PREWRITE);
if (sc->prioq.queued == 0 && sc->txq.queued == 0)
sc->sc_tx_timer = 0;
- if (sc->prioq.queued < RT2560_PRIO_RING_COUNT) {
- sc->sc_flags &= ~RT2560_F_PRIO_OACTIVE;
- if ((sc->sc_flags &
- (RT2560_F_DATA_OACTIVE | RT2560_F_PRIO_OACTIVE)) == 0)
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- rt2560_start_locked(ifp);
- }
+ if (sc->prioq.queued < RT2560_PRIO_RING_COUNT)
+ rt2560_start(sc);
}
/*
* Some frames were processed by the hardware cipher engine and are ready for
* handoff to the IEEE802.11 layer.
*/
static void
rt2560_decryption_intr(struct rt2560_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct rt2560_rx_desc *desc;
struct rt2560_rx_data *data;
bus_addr_t physaddr;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *mnew, *m;
int hw, error;
int8_t rssi, nf;
/* retrieve last decriptor index processed by cipher engine */
hw = RAL_READ(sc, RT2560_SECCSR0) - sc->rxq.physaddr;
hw /= RT2560_RX_DESC_SIZE;
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_POSTREAD);
for (; sc->rxq.cur_decrypt != hw;) {
desc = &sc->rxq.desc[sc->rxq.cur_decrypt];
data = &sc->rxq.data[sc->rxq.cur_decrypt];
if ((le32toh(desc->flags) & RT2560_RX_BUSY) ||
(le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY))
break;
if (data->drop) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
if ((le32toh(desc->flags) & RT2560_RX_CIPHER_MASK) != 0 &&
(le32toh(desc->flags) & RT2560_RX_ICV_ERROR)) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
/*
* Try to allocate a new mbuf for this ring element and load it
* before processing the current mbuf. If the ring element
* cannot be loaded, drop the received packet and reuse the old
* mbuf. In the unlikely case that the old mbuf can't be
* reloaded either, explicitly panic.
*/
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxq.data_dmat, data->map);
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(mnew, void *), MCLBYTES, rt2560_dma_map_addr,
&physaddr, 0);
if (error != 0) {
m_freem(mnew);
/* try to reload the old mbuf */
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(data->m, void *), MCLBYTES,
rt2560_dma_map_addr, &physaddr, 0);
if (error != 0) {
/* very unlikely that it will fail... */
panic("%s: could not load old rx mbuf",
device_get_name(sc->sc_dev));
}
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
/*
* New mbuf successfully loaded, update Rx ring and continue
* processing.
*/
m = data->m;
data->m = mnew;
desc->physaddr = htole32(physaddr);
/* finalize mbuf */
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len =
(le32toh(desc->flags) >> 16) & 0xfff;
rssi = RT2560_RSSI(sc, desc->rssi);
nf = RT2560_NOISE_FLOOR;
if (ieee80211_radiotap_active(ic)) {
struct rt2560_rx_radiotap_header *tap = &sc->sc_rxtap;
uint32_t tsf_lo, tsf_hi;
/* get timestamp (low and high 32 bits) */
tsf_hi = RAL_READ(sc, RT2560_CSR17);
tsf_lo = RAL_READ(sc, RT2560_CSR16);
tap->wr_tsf =
htole64(((uint64_t)tsf_hi << 32) | tsf_lo);
tap->wr_flags = 0;
tap->wr_rate = ieee80211_plcp2rate(desc->rate,
(desc->flags & htole32(RT2560_RX_OFDM)) ?
IEEE80211_T_OFDM : IEEE80211_T_CCK);
tap->wr_antenna = sc->rx_ant;
tap->wr_antsignal = nf + rssi;
tap->wr_antnoise = nf;
}
sc->sc_flags |= RT2560_F_INPUT_RUNNING;
RAL_UNLOCK(sc);
wh = mtod(m, struct ieee80211_frame *);
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
RAL_LOCK(sc);
sc->sc_flags &= ~RT2560_F_INPUT_RUNNING;
skip: desc->flags = htole32(RT2560_RX_BUSY);
DPRINTFN(sc, 15, "decryption done idx=%u\n", sc->rxq.cur_decrypt);
sc->rxq.cur_decrypt =
(sc->rxq.cur_decrypt + 1) % RT2560_RX_RING_COUNT;
}
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_PREWRITE);
}
/*
* Some frames were received. Pass them to the hardware cipher engine before
* sending them to the 802.11 layer.
*/
static void
rt2560_rx_intr(struct rt2560_softc *sc)
{
struct rt2560_rx_desc *desc;
struct rt2560_rx_data *data;
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_POSTREAD);
for (;;) {
desc = &sc->rxq.desc[sc->rxq.cur];
data = &sc->rxq.data[sc->rxq.cur];
if ((le32toh(desc->flags) & RT2560_RX_BUSY) ||
(le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY))
break;
data->drop = 0;
if ((le32toh(desc->flags) & RT2560_RX_PHY_ERROR) ||
(le32toh(desc->flags) & RT2560_RX_CRC_ERROR)) {
/*
* This should not happen since we did not request
* to receive those frames when we filled RXCSR0.
*/
DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n",
le32toh(desc->flags));
data->drop = 1;
}
if (((le32toh(desc->flags) >> 16) & 0xfff) > MCLBYTES) {
DPRINTFN(sc, 5, "%s\n", "bad length");
data->drop = 1;
}
/* mark the frame for decryption */
desc->flags |= htole32(RT2560_RX_CIPHER_BUSY);
DPRINTFN(sc, 15, "rx done idx=%u\n", sc->rxq.cur);
sc->rxq.cur = (sc->rxq.cur + 1) % RT2560_RX_RING_COUNT;
}
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_PREWRITE);
/* kick decrypt */
RAL_WRITE(sc, RT2560_SECCSR0, RT2560_KICK_DECRYPT);
}
static void
rt2560_beacon_update(struct ieee80211vap *vap, int item)
{
struct rt2560_vap *rvp = RT2560_VAP(vap);
struct ieee80211_beacon_offsets *bo = &rvp->ral_bo;
setbit(bo->bo_flags, item);
}
/*
* This function is called periodically in IBSS mode when a new beacon must be
* sent out.
*/
static void
rt2560_beacon_expire(struct rt2560_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct rt2560_vap *rvp = RT2560_VAP(vap);
struct rt2560_tx_data *data;
if (ic->ic_opmode != IEEE80211_M_IBSS &&
ic->ic_opmode != IEEE80211_M_HOSTAP &&
ic->ic_opmode != IEEE80211_M_MBSS)
return;
data = &sc->bcnq.data[sc->bcnq.next];
/*
* Don't send beacon if bsschan isn't set
*/
if (data->ni == NULL)
return;
bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->bcnq.data_dmat, data->map);
/* XXX 1 =>'s mcast frames which means all PS sta's will wakeup! */
ieee80211_beacon_update(data->ni, &rvp->ral_bo, data->m, 1);
rt2560_tx_bcn(sc, data->m, data->ni);
DPRINTFN(sc, 15, "%s", "beacon expired\n");
sc->bcnq.next = (sc->bcnq.next + 1) % RT2560_BEACON_RING_COUNT;
}
/* ARGSUSED */
static void
rt2560_wakeup_expire(struct rt2560_softc *sc)
{
DPRINTFN(sc, 2, "%s", "wakeup expired\n");
}
void
rt2560_intr(void *arg)
{
struct rt2560_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t r;
RAL_LOCK(sc);
/* disable interrupts */
RAL_WRITE(sc, RT2560_CSR8, 0xffffffff);
/* don't re-enable interrupts if we're shutting down */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(sc->sc_flags & RT2560_F_RUNNING)) {
RAL_UNLOCK(sc);
return;
}
r = RAL_READ(sc, RT2560_CSR7);
RAL_WRITE(sc, RT2560_CSR7, r);
if (r & RT2560_BEACON_EXPIRE)
rt2560_beacon_expire(sc);
if (r & RT2560_WAKEUP_EXPIRE)
rt2560_wakeup_expire(sc);
if (r & RT2560_ENCRYPTION_DONE)
rt2560_encryption_intr(sc);
if (r & RT2560_TX_DONE)
rt2560_tx_intr(sc);
if (r & RT2560_PRIO_DONE)
rt2560_prio_intr(sc);
if (r & RT2560_DECRYPTION_DONE)
rt2560_decryption_intr(sc);
if (r & RT2560_RX_DONE) {
rt2560_rx_intr(sc);
rt2560_encryption_intr(sc);
}
/* re-enable interrupts */
RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK);
RAL_UNLOCK(sc);
}
#define RAL_SIFS 10 /* us */
#define RT2560_TXRX_TURNAROUND 10 /* us */
static uint8_t
rt2560_plcp_signal(int rate)
{
switch (rate) {
/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
case 12: return 0xb;
case 18: return 0xf;
case 24: return 0xa;
case 36: return 0xe;
case 48: return 0x9;
case 72: return 0xd;
case 96: return 0x8;
case 108: return 0xc;
/* CCK rates (NB: not IEEE std, device-specific) */
case 2: return 0x0;
case 4: return 0x1;
case 11: return 0x2;
case 22: return 0x3;
}
return 0xff; /* XXX unsupported/unknown rate */
}
static void
rt2560_setup_tx_desc(struct rt2560_softc *sc, struct rt2560_tx_desc *desc,
uint32_t flags, int len, int rate, int encrypt, bus_addr_t physaddr)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t plcp_length;
int remainder;
desc->flags = htole32(flags);
desc->flags |= htole32(len << 16);
desc->physaddr = htole32(physaddr);
desc->wme = htole16(
RT2560_AIFSN(2) |
RT2560_LOGCWMIN(3) |
RT2560_LOGCWMAX(8));
/* setup PLCP fields */
desc->plcp_signal = rt2560_plcp_signal(rate);
desc->plcp_service = 4;
len += IEEE80211_CRC_LEN;
if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) {
desc->flags |= htole32(RT2560_TX_OFDM);
plcp_length = len & 0xfff;
desc->plcp_length_hi = plcp_length >> 6;
desc->plcp_length_lo = plcp_length & 0x3f;
} else {
plcp_length = (16 * len + rate - 1) / rate;
if (rate == 22) {
remainder = (16 * len) % 22;
if (remainder != 0 && remainder < 7)
desc->plcp_service |= RT2560_PLCP_LENGEXT;
}
desc->plcp_length_hi = plcp_length >> 8;
desc->plcp_length_lo = plcp_length & 0xff;
if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
desc->plcp_signal |= 0x08;
}
if (!encrypt)
desc->flags |= htole32(RT2560_TX_VALID);
desc->flags |= encrypt ? htole32(RT2560_TX_CIPHER_BUSY)
: htole32(RT2560_TX_BUSY);
}
static int
rt2560_tx_bcn(struct rt2560_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
int nsegs, rate, error;
desc = &sc->bcnq.desc[sc->bcnq.cur];
data = &sc->bcnq.data[sc->bcnq.cur];
/* XXX maybe a separate beacon rate? */
rate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].mgmtrate;
error = bus_dmamap_load_mbuf_sg(sc->bcnq.data_dmat, data->map, m0,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
rt2560_setup_tx_desc(sc, desc, RT2560_TX_IFS_NEWBACKOFF |
RT2560_TX_TIMESTAMP, m0->m_pkthdr.len, rate, 0, segs->ds_addr);
DPRINTFN(sc, 10, "sending beacon frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->bcnq.cur, rate);
bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->bcnq.desc_dmat, sc->bcnq.desc_map,
BUS_DMASYNC_PREWRITE);
sc->bcnq.cur = (sc->bcnq.cur + 1) % RT2560_BEACON_RING_COUNT;
return 0;
}
static int
rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct ieee80211_frame *wh;
struct ieee80211_key *k;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
uint16_t dur;
uint32_t flags = 0;
int nsegs, rate, error;
desc = &sc->prioq.desc[sc->prioq.cur];
data = &sc->prioq.data[sc->prioq.cur];
rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate;
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
}
error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
/* management frames are not taken into account for amrr */
data->rix = IEEE80211_FIXED_RATE_NONE;
wh = mtod(m0, struct ieee80211_frame *);
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2560_TX_ACK;
dur = ieee80211_ack_duration(ic->ic_rt,
rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE);
*(uint16_t *)wh->i_dur = htole16(dur);
/* tell hardware to add timestamp for probe responses */
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
IEEE80211_FC0_TYPE_MGT &&
(wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
IEEE80211_FC0_SUBTYPE_PROBE_RESP)
flags |= RT2560_TX_TIMESTAMP;
}
rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 0,
segs->ds_addr);
bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->prioq.cur, rate);
/* kick prio */
sc->prioq.queued++;
sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT;
RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO);
return 0;
}
static int
rt2560_sendprot(struct rt2560_softc *sc,
const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate)
{
struct ieee80211com *ic = ni->ni_ic;
const struct ieee80211_frame *wh;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct mbuf *mprot;
int protrate, ackrate, pktlen, flags, isshort, error;
uint16_t dur;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
int nsegs;
KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY,
("protection %d", prot));
wh = mtod(m, const struct ieee80211_frame *);
pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN;
protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
ackrate = ieee80211_ack_rate(ic->ic_rt, rate);
isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0;
dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort)
+ ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags = RT2560_TX_MORE_FRAG;
if (prot == IEEE80211_PROT_RTSCTS) {
/* NB: CTS is the same size as an ACK */
dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags |= RT2560_TX_ACK;
mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur);
} else {
mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur);
}
if (mprot == NULL) {
/* XXX stat + msg */
return ENOBUFS;
}
desc = &sc->txq.desc[sc->txq.cur_encrypt];
data = &sc->txq.data[sc->txq.cur_encrypt];
error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map,
mprot, segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(mprot);
return error;
}
data->m = mprot;
data->ni = ieee80211_ref_node(ni);
/* ctl frames are not taken into account for amrr */
data->rix = IEEE80211_FIXED_RATE_NONE;
rt2560_setup_tx_desc(sc, desc, flags, mprot->m_pkthdr.len, protrate, 1,
segs->ds_addr);
bus_dmamap_sync(sc->txq.data_dmat, data->map,
BUS_DMASYNC_PREWRITE);
sc->txq.queued++;
sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT;
return 0;
}
static int
rt2560_tx_raw(struct rt2560_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
uint32_t flags;
int nsegs, rate, error;
desc = &sc->prioq.desc[sc->prioq.cur];
data = &sc->prioq.data[sc->prioq.cur];
rate = params->ibp_rate0;
if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
/* XXX fall back to mcast/mgmt rate? */
m_freem(m0);
return EINVAL;
}
flags = 0;
if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
flags |= RT2560_TX_ACK;
if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) {
error = rt2560_sendprot(sc, m0, ni,
params->ibp_flags & IEEE80211_BPF_RTS ?
IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY,
rate);
if (error) {
m_freem(m0);
return error;
}
flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS;
}
error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(ni->ni_vap, m0);
}
data->m = m0;
data->ni = ni;
/* XXX need to setup descriptor ourself */
rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len,
rate, (params->ibp_flags & IEEE80211_BPF_CRYPTO) != 0,
segs->ds_addr);
bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending raw frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->prioq.cur, rate);
/* kick prio */
sc->prioq.queued++;
sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT;
RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO);
return 0;
}
static int
rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp;
struct ieee80211_key *k;
struct mbuf *mnew;
bus_dma_segment_t segs[RT2560_MAX_SCATTER];
uint16_t dur;
uint32_t flags;
int nsegs, rate, error;
wh = mtod(m0, struct ieee80211_frame *);
tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
rate = tp->mcastrate;
} else if (m0->m_flags & M_EAPOL) {
rate = tp->mgmtrate;
} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
rate = tp->ucastrate;
} else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
}
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
flags = 0;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
int prot = IEEE80211_PROT_NONE;
if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
prot = IEEE80211_PROT_RTSCTS;
else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM)
prot = ic->ic_protmode;
if (prot != IEEE80211_PROT_NONE) {
error = rt2560_sendprot(sc, m0, ni, prot, rate);
if (error) {
m_freem(m0);
return error;
}
flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS;
}
}
data = &sc->txq.data[sc->txq.cur_encrypt];
desc = &sc->txq.desc[sc->txq.cur_encrypt];
error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
m_freem(m0);
return ENOBUFS;
}
m0 = mnew;
error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map,
m0, segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(m0);
return error;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
/* remember link conditions for rate adaptation algorithm */
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) {
data->rix = ni->ni_txrate;
/* XXX probably need last rssi value and not avg */
data->rssi = ic->ic_node_getrssi(ni);
} else
data->rix = IEEE80211_FIXED_RATE_NONE;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2560_TX_ACK;
dur = ieee80211_ack_duration(ic->ic_rt,
rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE);
*(uint16_t *)wh->i_dur = htole16(dur);
}
rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 1,
segs->ds_addr);
bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->txq.cur_encrypt, rate);
/* kick encrypt */
sc->txq.queued++;
sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT;
RAL_WRITE(sc, RT2560_SECCSR1, RT2560_KICK_ENCRYPT);
return 0;
}
+static int
+rt2560_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct rt2560_softc *sc = ic->ic_softc;
+ int error;
+
+ RAL_LOCK(sc);
+ if ((sc->sc_flags & RT2560_F_RUNNING) == 0) {
+ RAL_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ RAL_UNLOCK(sc);
+ return (error);
+ }
+ rt2560_start(sc);
+ RAL_UNLOCK(sc);
+
+ return (0);
+}
+
static void
-rt2560_start_locked(struct ifnet *ifp)
+rt2560_start(struct rt2560_softc *sc)
{
- struct rt2560_softc *sc = ifp->if_softc;
- struct mbuf *m;
struct ieee80211_node *ni;
+ struct mbuf *m;
RAL_LOCK_ASSERT(sc);
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
- if (sc->txq.queued >= RT2560_TX_RING_COUNT - 1) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- sc->sc_flags |= RT2560_F_DATA_OACTIVE;
- break;
- }
+ while (sc->txq.queued < RT2560_TX_RING_COUNT - 1 &&
+ (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
if (rt2560_tx_data(sc, m, ni) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
break;
}
-
sc->sc_tx_timer = 5;
}
}
static void
-rt2560_start(struct ifnet *ifp)
-{
- struct rt2560_softc *sc = ifp->if_softc;
-
- RAL_LOCK(sc);
- rt2560_start_locked(ifp);
- RAL_UNLOCK(sc);
-}
-
-static void
rt2560_watchdog(void *arg)
{
struct rt2560_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
RAL_LOCK_ASSERT(sc);
- KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
+ KASSERT(sc->sc_flags & RT2560_F_RUNNING, ("not running"));
if (sc->sc_invalid) /* card ejected */
return;
rt2560_encryption_intr(sc);
rt2560_tx_intr(sc);
if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) {
- if_printf(ifp, "device timeout\n");
+ device_printf(sc->sc_dev, "device timeout\n");
rt2560_init_locked(sc);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
/* NB: callout is reset in rt2560_init() */
return;
}
callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc);
}
-static int
-rt2560_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+rt2560_parent(struct ieee80211com *ic)
{
- struct rt2560_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ struct rt2560_softc *sc = ic->ic_softc;
+ int startall = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- RAL_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- rt2560_init_locked(sc);
- startall = 1;
- } else
- rt2560_update_promisc(ic);
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- rt2560_stop_locked(sc);
- }
- RAL_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
- }
- return error;
+ RAL_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ if ((sc->sc_flags & RT2560_F_RUNNING) == 0) {
+ rt2560_init_locked(sc);
+ startall = 1;
+ } else
+ rt2560_update_promisc(ic);
+ } else if (sc->sc_flags & RT2560_F_RUNNING)
+ rt2560_stop_locked(sc);
+ RAL_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static void
rt2560_bbp_write(struct rt2560_softc *sc, uint8_t reg, uint8_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to BBP\n");
return;
}
tmp = RT2560_BBP_WRITE | RT2560_BBP_BUSY | reg << 8 | val;
RAL_WRITE(sc, RT2560_BBPCSR, tmp);
DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val);
}
static uint8_t
rt2560_bbp_read(struct rt2560_softc *sc, uint8_t reg)
{
uint32_t val;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not read from BBP\n");
return 0;
}
val = RT2560_BBP_BUSY | reg << 8;
RAL_WRITE(sc, RT2560_BBPCSR, val);
for (ntries = 0; ntries < 100; ntries++) {
val = RAL_READ(sc, RT2560_BBPCSR);
if (!(val & RT2560_BBP_BUSY))
return val & 0xff;
DELAY(1);
}
device_printf(sc->sc_dev, "could not read from BBP\n");
return 0;
}
static void
rt2560_rf_write(struct rt2560_softc *sc, uint8_t reg, uint32_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2560_RFCSR) & RT2560_RF_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to RF\n");
return;
}
tmp = RT2560_RF_BUSY | RT2560_RF_20BIT | (val & 0xfffff) << 2 |
(reg & 0x3);
RAL_WRITE(sc, RT2560_RFCSR, tmp);
/* remember last written value in sc */
sc->rf_regs[reg] = val;
DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff);
}
static void
rt2560_set_chan(struct rt2560_softc *sc, struct ieee80211_channel *c)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t power, tmp;
u_int i, chan;
chan = ieee80211_chan2ieee(ic, c);
KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan));
if (IEEE80211_IS_CHAN_2GHZ(c))
power = min(sc->txpow[chan - 1], 31);
else
power = 31;
/* adjust txpower using ifconfig settings */
power -= (100 - ic->ic_txpowlimit) / 8;
DPRINTFN(sc, 2, "setting channel to %u, txpower to %u\n", chan, power);
switch (sc->rf_rev) {
case RT2560_RF_2522:
rt2560_rf_write(sc, RAL_RF1, 0x00814);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2522_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
break;
case RT2560_RF_2523:
rt2560_rf_write(sc, RAL_RF1, 0x08804);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2523_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x38044);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
break;
case RT2560_RF_2524:
rt2560_rf_write(sc, RAL_RF1, 0x0c808);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2524_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
break;
case RT2560_RF_2525:
rt2560_rf_write(sc, RAL_RF1, 0x08808);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_hi_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
rt2560_rf_write(sc, RAL_RF1, 0x08808);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
break;
case RT2560_RF_2525E:
rt2560_rf_write(sc, RAL_RF1, 0x08808);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525e_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282);
break;
case RT2560_RF_2526:
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_hi_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
rt2560_rf_write(sc, RAL_RF1, 0x08804);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_r2[chan - 1]);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
break;
/* dual-band RF */
case RT2560_RF_5222:
for (i = 0; rt2560_rf5222[i].chan != chan; i++);
rt2560_rf_write(sc, RAL_RF1, rt2560_rf5222[i].r1);
rt2560_rf_write(sc, RAL_RF2, rt2560_rf5222[i].r2);
rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
rt2560_rf_write(sc, RAL_RF4, rt2560_rf5222[i].r4);
break;
default:
printf("unknown ral rev=%d\n", sc->rf_rev);
}
/* XXX */
if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
/* set Japan filter bit for channel 14 */
tmp = rt2560_bbp_read(sc, 70);
tmp &= ~RT2560_JAPAN_FILTER;
if (chan == 14)
tmp |= RT2560_JAPAN_FILTER;
rt2560_bbp_write(sc, 70, tmp);
/* clear CRC errors */
RAL_READ(sc, RT2560_CNT0);
}
}
static void
rt2560_set_channel(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2560_softc *sc = ifp->if_softc;
+ struct rt2560_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
rt2560_set_chan(sc, ic->ic_curchan);
RAL_UNLOCK(sc);
}
#if 0
/*
* Disable RF auto-tuning.
*/
static void
rt2560_disable_rf_tune(struct rt2560_softc *sc)
{
uint32_t tmp;
if (sc->rf_rev != RT2560_RF_2523) {
tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE;
rt2560_rf_write(sc, RAL_RF1, tmp);
}
tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE;
rt2560_rf_write(sc, RAL_RF3, tmp);
DPRINTFN(sc, 2, "%s", "disabling RF autotune\n");
}
#endif
/*
* Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF
* synchronization.
*/
static void
rt2560_enable_tsf_sync(struct rt2560_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint16_t logcwmin, preload;
uint32_t tmp;
/* first, disable TSF synchronization */
RAL_WRITE(sc, RT2560_CSR14, 0);
tmp = 16 * vap->iv_bss->ni_intval;
RAL_WRITE(sc, RT2560_CSR12, tmp);
RAL_WRITE(sc, RT2560_CSR13, 0);
logcwmin = 5;
preload = (vap->iv_opmode == IEEE80211_M_STA) ? 384 : 1024;
tmp = logcwmin << 16 | preload;
RAL_WRITE(sc, RT2560_BCNOCSR, tmp);
/* finally, enable TSF synchronization */
tmp = RT2560_ENABLE_TSF | RT2560_ENABLE_TBCN;
if (ic->ic_opmode == IEEE80211_M_STA)
tmp |= RT2560_ENABLE_TSF_SYNC(1);
else
tmp |= RT2560_ENABLE_TSF_SYNC(2) |
RT2560_ENABLE_BEACON_GENERATOR;
RAL_WRITE(sc, RT2560_CSR14, tmp);
DPRINTF(sc, "%s", "enabling TSF synchronization\n");
}
static void
rt2560_enable_tsf(struct rt2560_softc *sc)
{
RAL_WRITE(sc, RT2560_CSR14, 0);
RAL_WRITE(sc, RT2560_CSR14,
RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_TSF);
}
static void
rt2560_update_plcp(struct rt2560_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
/* no short preamble for 1Mbps */
RAL_WRITE(sc, RT2560_PLCP1MCSR, 0x00700400);
if (!(ic->ic_flags & IEEE80211_F_SHPREAMBLE)) {
/* values taken from the reference driver */
RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380401);
RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x00150402);
RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b8403);
} else {
/* same values as above or'ed 0x8 */
RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380409);
RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x0015040a);
RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b840b);
}
DPRINTF(sc, "updating PLCP for %s preamble\n",
(ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? "short" : "long");
}
/*
* This function can be called by ieee80211_set_shortslottime(). Refer to
* IEEE Std 802.11-1999 pp. 85 to know how these values are computed.
*/
static void
rt2560_update_slot(struct ieee80211com *ic)
{
struct rt2560_softc *sc = ic->ic_softc;
uint8_t slottime;
uint16_t tx_sifs, tx_pifs, tx_difs, eifs;
uint32_t tmp;
#ifndef FORCE_SLOTTIME
slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20;
#else
/*
* Setting slot time according to "short slot time" capability
* in beacon/probe_resp seems to cause problem to acknowledge
* certain AP's data frames transimitted at CCK/DS rates: the
* problematic AP keeps retransmitting data frames, probably
* because MAC level acks are not received by hardware.
* So we cheat a little bit here by claiming we are capable of
* "short slot time" but setting hardware slot time to the normal
* slot time. ral(4) does not seem to have trouble to receive
* frames transmitted using short slot time even if hardware
* slot time is set to normal slot time. If we didn't use this
* trick, we would have to claim that short slot time is not
* supported; this would give relative poor RX performance
* (-1Mb~-2Mb lower) and the _whole_ BSS would stop using short
* slot time.
*/
slottime = 20;
#endif
/* update the MAC slot boundaries */
tx_sifs = RAL_SIFS - RT2560_TXRX_TURNAROUND;
tx_pifs = tx_sifs + slottime;
tx_difs = tx_sifs + 2 * slottime;
eifs = (ic->ic_curmode == IEEE80211_MODE_11B) ? 364 : 60;
tmp = RAL_READ(sc, RT2560_CSR11);
tmp = (tmp & ~0x1f00) | slottime << 8;
RAL_WRITE(sc, RT2560_CSR11, tmp);
tmp = tx_pifs << 16 | tx_sifs;
RAL_WRITE(sc, RT2560_CSR18, tmp);
tmp = eifs << 16 | tx_difs;
RAL_WRITE(sc, RT2560_CSR19, tmp);
DPRINTF(sc, "setting slottime to %uus\n", slottime);
}
static void
rt2560_set_basicrates(struct rt2560_softc *sc,
const struct ieee80211_rateset *rs)
{
#define RV(r) ((r) & IEEE80211_RATE_VAL)
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t mask = 0;
uint8_t rate;
int i;
for (i = 0; i < rs->rs_nrates; i++) {
rate = rs->rs_rates[i];
if (!(rate & IEEE80211_RATE_BASIC))
continue;
mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, RV(rate));
}
RAL_WRITE(sc, RT2560_ARSP_PLCP_1, mask);
DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask);
#undef RV
}
static void
rt2560_update_led(struct rt2560_softc *sc, int led1, int led2)
{
uint32_t tmp;
/* set ON period to 70ms and OFF period to 30ms */
tmp = led1 << 16 | led2 << 17 | 70 << 8 | 30;
RAL_WRITE(sc, RT2560_LEDCSR, tmp);
}
static void
rt2560_set_bssid(struct rt2560_softc *sc, const uint8_t *bssid)
{
uint32_t tmp;
tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24;
RAL_WRITE(sc, RT2560_CSR5, tmp);
tmp = bssid[4] | bssid[5] << 8;
RAL_WRITE(sc, RT2560_CSR6, tmp);
DPRINTF(sc, "setting BSSID to %6D\n", bssid, ":");
}
static void
-rt2560_set_macaddr(struct rt2560_softc *sc, uint8_t *addr)
+rt2560_set_macaddr(struct rt2560_softc *sc, const uint8_t *addr)
{
uint32_t tmp;
tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24;
RAL_WRITE(sc, RT2560_CSR3, tmp);
tmp = addr[4] | addr[5] << 8;
RAL_WRITE(sc, RT2560_CSR4, tmp);
DPRINTF(sc, "setting MAC address to %6D\n", addr, ":");
}
static void
rt2560_get_macaddr(struct rt2560_softc *sc, uint8_t *addr)
{
uint32_t tmp;
tmp = RAL_READ(sc, RT2560_CSR3);
addr[0] = tmp & 0xff;
addr[1] = (tmp >> 8) & 0xff;
addr[2] = (tmp >> 16) & 0xff;
addr[3] = (tmp >> 24);
tmp = RAL_READ(sc, RT2560_CSR4);
addr[4] = tmp & 0xff;
addr[5] = (tmp >> 8) & 0xff;
}
static void
rt2560_update_promisc(struct ieee80211com *ic)
{
struct rt2560_softc *sc = ic->ic_softc;
uint32_t tmp;
tmp = RAL_READ(sc, RT2560_RXCSR0);
tmp &= ~RT2560_DROP_NOT_TO_ME;
- if (!(ic->ic_ifp->if_flags & IFF_PROMISC))
+ if (ic->ic_promisc == 0)
tmp |= RT2560_DROP_NOT_TO_ME;
RAL_WRITE(sc, RT2560_RXCSR0, tmp);
DPRINTF(sc, "%s promiscuous mode\n",
- (ic->ic_ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
+ (ic->ic_promisc > 0) ? "entering" : "leaving");
}
static const char *
rt2560_get_rf(int rev)
{
switch (rev) {
case RT2560_RF_2522: return "RT2522";
case RT2560_RF_2523: return "RT2523";
case RT2560_RF_2524: return "RT2524";
case RT2560_RF_2525: return "RT2525";
case RT2560_RF_2525E: return "RT2525e";
case RT2560_RF_2526: return "RT2526";
case RT2560_RF_5222: return "RT5222";
default: return "unknown";
}
}
static void
rt2560_read_config(struct rt2560_softc *sc)
{
uint16_t val;
int i;
val = rt2560_eeprom_read(sc, RT2560_EEPROM_CONFIG0);
sc->rf_rev = (val >> 11) & 0x7;
sc->hw_radio = (val >> 10) & 0x1;
sc->led_mode = (val >> 6) & 0x7;
sc->rx_ant = (val >> 4) & 0x3;
sc->tx_ant = (val >> 2) & 0x3;
sc->nb_ant = val & 0x3;
/* read default values for BBP registers */
for (i = 0; i < 16; i++) {
val = rt2560_eeprom_read(sc, RT2560_EEPROM_BBP_BASE + i);
if (val == 0 || val == 0xffff)
continue;
sc->bbp_prom[i].reg = val >> 8;
sc->bbp_prom[i].val = val & 0xff;
}
/* read Tx power for all b/g channels */
for (i = 0; i < 14 / 2; i++) {
val = rt2560_eeprom_read(sc, RT2560_EEPROM_TXPOWER + i);
sc->txpow[i * 2] = val & 0xff;
sc->txpow[i * 2 + 1] = val >> 8;
}
for (i = 0; i < 14; ++i) {
if (sc->txpow[i] > 31)
sc->txpow[i] = 24;
}
val = rt2560_eeprom_read(sc, RT2560_EEPROM_CALIBRATE);
if ((val & 0xff) == 0xff)
sc->rssi_corr = RT2560_DEFAULT_RSSI_CORR;
else
sc->rssi_corr = val & 0xff;
DPRINTF(sc, "rssi correction %d, calibrate 0x%02x\n",
sc->rssi_corr, val);
}
static void
rt2560_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2560_softc *sc = ifp->if_softc;
+ struct rt2560_softc *sc = ic->ic_softc;
/* abort TSF synchronization */
RAL_WRITE(sc, RT2560_CSR14, 0);
- rt2560_set_bssid(sc, ifp->if_broadcastaddr);
+ rt2560_set_bssid(sc, ieee80211broadcastaddr);
}
static void
rt2560_scan_end(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2560_softc *sc = ifp->if_softc;
+ struct rt2560_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = ic->ic_scan->ss_vap;
rt2560_enable_tsf_sync(sc);
/* XXX keep local copy */
rt2560_set_bssid(sc, vap->iv_bss->ni_bssid);
}
static int
rt2560_bbp_init(struct rt2560_softc *sc)
{
#define N(a) (sizeof (a) / sizeof ((a)[0]))
int i, ntries;
/* wait for BBP to be ready */
for (ntries = 0; ntries < 100; ntries++) {
if (rt2560_bbp_read(sc, RT2560_BBP_VERSION) != 0)
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for BBP\n");
return EIO;
}
/* initialize BBP registers to default values */
for (i = 0; i < N(rt2560_def_bbp); i++) {
rt2560_bbp_write(sc, rt2560_def_bbp[i].reg,
rt2560_def_bbp[i].val);
}
/* initialize BBP registers to values stored in EEPROM */
for (i = 0; i < 16; i++) {
if (sc->bbp_prom[i].reg == 0 && sc->bbp_prom[i].val == 0)
break;
rt2560_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
}
rt2560_bbp_write(sc, 17, 0x48); /* XXX restore bbp17 */
return 0;
#undef N
}
static void
rt2560_set_txantenna(struct rt2560_softc *sc, int antenna)
{
uint32_t tmp;
uint8_t tx;
tx = rt2560_bbp_read(sc, RT2560_BBP_TX) & ~RT2560_BBP_ANTMASK;
if (antenna == 1)
tx |= RT2560_BBP_ANTA;
else if (antenna == 2)
tx |= RT2560_BBP_ANTB;
else
tx |= RT2560_BBP_DIVERSITY;
/* need to force I/Q flip for RF 2525e, 2526 and 5222 */
if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526 ||
sc->rf_rev == RT2560_RF_5222)
tx |= RT2560_BBP_FLIPIQ;
rt2560_bbp_write(sc, RT2560_BBP_TX, tx);
/* update values for CCK and OFDM in BBPCSR1 */
tmp = RAL_READ(sc, RT2560_BBPCSR1) & ~0x00070007;
tmp |= (tx & 0x7) << 16 | (tx & 0x7);
RAL_WRITE(sc, RT2560_BBPCSR1, tmp);
}
static void
rt2560_set_rxantenna(struct rt2560_softc *sc, int antenna)
{
uint8_t rx;
rx = rt2560_bbp_read(sc, RT2560_BBP_RX) & ~RT2560_BBP_ANTMASK;
if (antenna == 1)
rx |= RT2560_BBP_ANTA;
else if (antenna == 2)
rx |= RT2560_BBP_ANTB;
else
rx |= RT2560_BBP_DIVERSITY;
/* need to force no I/Q flip for RF 2525e and 2526 */
if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526)
rx &= ~RT2560_BBP_FLIPIQ;
rt2560_bbp_write(sc, RT2560_BBP_RX, rx);
}
static void
rt2560_init_locked(struct rt2560_softc *sc)
{
#define N(a) (sizeof (a) / sizeof ((a)[0]))
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
int i;
RAL_LOCK_ASSERT(sc);
rt2560_stop_locked(sc);
/* setup tx rings */
tmp = RT2560_PRIO_RING_COUNT << 24 |
RT2560_ATIM_RING_COUNT << 16 |
RT2560_TX_RING_COUNT << 8 |
RT2560_TX_DESC_SIZE;
/* rings must be initialized in this exact order */
RAL_WRITE(sc, RT2560_TXCSR2, tmp);
RAL_WRITE(sc, RT2560_TXCSR3, sc->txq.physaddr);
RAL_WRITE(sc, RT2560_TXCSR5, sc->prioq.physaddr);
RAL_WRITE(sc, RT2560_TXCSR4, sc->atimq.physaddr);
RAL_WRITE(sc, RT2560_TXCSR6, sc->bcnq.physaddr);
/* setup rx ring */
tmp = RT2560_RX_RING_COUNT << 8 | RT2560_RX_DESC_SIZE;
RAL_WRITE(sc, RT2560_RXCSR1, tmp);
RAL_WRITE(sc, RT2560_RXCSR2, sc->rxq.physaddr);
/* initialize MAC registers to default values */
for (i = 0; i < N(rt2560_def_mac); i++)
RAL_WRITE(sc, rt2560_def_mac[i].reg, rt2560_def_mac[i].val);
- rt2560_set_macaddr(sc, IF_LLADDR(ifp));
+ rt2560_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
/* set basic rate set (will be updated later) */
RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x153);
rt2560_update_slot(ic);
rt2560_update_plcp(sc);
rt2560_update_led(sc, 0, 0);
RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC);
RAL_WRITE(sc, RT2560_CSR1, RT2560_HOST_READY);
if (rt2560_bbp_init(sc) != 0) {
rt2560_stop_locked(sc);
return;
}
rt2560_set_txantenna(sc, sc->tx_ant);
rt2560_set_rxantenna(sc, sc->rx_ant);
/* set default BSS channel */
rt2560_set_chan(sc, ic->ic_curchan);
/* kick Rx */
tmp = RT2560_DROP_PHY_ERROR | RT2560_DROP_CRC_ERROR;
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
tmp |= RT2560_DROP_CTL | RT2560_DROP_VERSION_ERROR;
if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
ic->ic_opmode != IEEE80211_M_MBSS)
tmp |= RT2560_DROP_TODS;
- if (!(ifp->if_flags & IFF_PROMISC))
+ if (ic->ic_promisc == 0)
tmp |= RT2560_DROP_NOT_TO_ME;
}
RAL_WRITE(sc, RT2560_RXCSR0, tmp);
/* clear old FCS and Rx FIFO errors */
RAL_READ(sc, RT2560_CNT0);
RAL_READ(sc, RT2560_CNT4);
/* clear any pending interrupts */
RAL_WRITE(sc, RT2560_CSR7, 0xffffffff);
/* enable interrupts */
RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_flags |= RT2560_F_RUNNING;
callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc);
#undef N
}
static void
rt2560_init(void *priv)
{
struct rt2560_softc *sc = priv;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
RAL_LOCK(sc);
rt2560_init_locked(sc);
RAL_UNLOCK(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_flags & RT2560_F_RUNNING)
ieee80211_start_all(ic); /* start all vap's */
}
static void
rt2560_stop_locked(struct rt2560_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
volatile int *flags = &sc->sc_flags;
RAL_LOCK_ASSERT(sc);
while (*flags & RT2560_F_INPUT_RUNNING)
msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10);
callout_stop(&sc->watchdog_ch);
sc->sc_tx_timer = 0;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ if (sc->sc_flags & RT2560_F_RUNNING) {
+ sc->sc_flags &= ~RT2560_F_RUNNING;
/* abort Tx */
RAL_WRITE(sc, RT2560_TXCSR0, RT2560_ABORT_TX);
/* disable Rx */
RAL_WRITE(sc, RT2560_RXCSR0, RT2560_DISABLE_RX);
/* reset ASIC (imply reset BBP) */
RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC);
RAL_WRITE(sc, RT2560_CSR1, 0);
/* disable interrupts */
RAL_WRITE(sc, RT2560_CSR8, 0xffffffff);
/* reset Tx and Rx rings */
rt2560_reset_tx_ring(sc, &sc->txq);
rt2560_reset_tx_ring(sc, &sc->atimq);
rt2560_reset_tx_ring(sc, &sc->prioq);
rt2560_reset_tx_ring(sc, &sc->bcnq);
rt2560_reset_rx_ring(sc, &sc->rxq);
}
- sc->sc_flags &= ~(RT2560_F_PRIO_OACTIVE | RT2560_F_DATA_OACTIVE);
}
void
rt2560_stop(void *arg)
{
struct rt2560_softc *sc = arg;
RAL_LOCK(sc);
rt2560_stop_locked(sc);
RAL_UNLOCK(sc);
}
static int
rt2560_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2560_softc *sc = ifp->if_softc;
+ struct rt2560_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(sc->sc_flags & RT2560_F_RUNNING)) {
RAL_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return ENETDOWN;
}
if (sc->prioq.queued >= RT2560_PRIO_RING_COUNT) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- sc->sc_flags |= RT2560_F_PRIO_OACTIVE;
RAL_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return ENOBUFS; /* XXX */
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
if (params == NULL) {
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
*/
if (rt2560_tx_mgt(sc, m, ni) != 0)
goto bad;
} else {
/*
* Caller supplied explicit parameters to use in
* sending the frame.
*/
if (rt2560_tx_raw(sc, m, ni, params))
goto bad;
}
sc->sc_tx_timer = 5;
RAL_UNLOCK(sc);
return 0;
bad:
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
RAL_UNLOCK(sc);
return EIO; /* XXX */
}
Index: head/sys/dev/ral/rt2560var.h
===================================================================
--- head/sys/dev/ral/rt2560var.h (revision 287196)
+++ head/sys/dev/ral/rt2560var.h (revision 287197)
@@ -1,168 +1,167 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2005, 2006
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
struct rt2560_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsf;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
uint8_t wr_antenna;
};
#define RT2560_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_ANTENNA) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
struct rt2560_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
uint8_t wt_antenna;
};
#define RT2560_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_ANTENNA))
struct rt2560_tx_data {
bus_dmamap_t map;
struct mbuf *m;
struct ieee80211_node *ni;
uint8_t rix;
int8_t rssi;
};
struct rt2560_tx_ring {
bus_dma_tag_t desc_dmat;
bus_dma_tag_t data_dmat;
bus_dmamap_t desc_map;
bus_addr_t physaddr;
struct rt2560_tx_desc *desc;
struct rt2560_tx_data *data;
int count;
int queued;
int cur;
int next;
int cur_encrypt;
int next_encrypt;
};
struct rt2560_rx_data {
bus_dmamap_t map;
struct mbuf *m;
int drop;
};
struct rt2560_rx_ring {
bus_dma_tag_t desc_dmat;
bus_dma_tag_t data_dmat;
bus_dmamap_t desc_map;
bus_addr_t physaddr;
struct rt2560_rx_desc *desc;
struct rt2560_rx_data *data;
int count;
int cur;
int next;
int cur_decrypt;
};
struct rt2560_vap {
struct ieee80211vap ral_vap;
struct ieee80211_beacon_offsets ral_bo;
int (*ral_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define RT2560_VAP(vap) ((struct rt2560_vap *)(vap))
struct rt2560_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mtx sc_mtx;
+ struct mbufq sc_snd;
device_t sc_dev;
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
- struct mtx sc_mtx;
-
struct callout watchdog_ch;
int sc_tx_timer;
int sc_invalid;
int sc_debug;
/*
* The same in both up to here
* ------------------------------------------------
*/
uint32_t asic_rev;
uint32_t eeprom_rev;
uint8_t rf_rev;
uint8_t rssi_corr;
struct rt2560_tx_ring txq;
struct rt2560_tx_ring prioq;
struct rt2560_tx_ring atimq;
struct rt2560_tx_ring bcnq;
struct rt2560_rx_ring rxq;
uint32_t rf_regs[4];
uint8_t txpow[14];
struct {
uint8_t reg;
uint8_t val;
} bbp_prom[16];
int led_mode;
int hw_radio;
int rx_ant;
int tx_ant;
int nb_ant;
struct rt2560_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct rt2560_tx_radiotap_header sc_txtap;
int sc_txtap_len;
#define RT2560_F_INPUT_RUNNING 0x1
-#define RT2560_F_PRIO_OACTIVE 0x2
-#define RT2560_F_DATA_OACTIVE 0x4
+#define RT2560_F_RUNNING 0x2
int sc_flags;
};
int rt2560_attach(device_t, int);
int rt2560_detach(void *);
void rt2560_stop(void *);
void rt2560_resume(void *);
void rt2560_intr(void *);
#define RAL_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define RAL_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
#define RAL_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
Index: head/sys/dev/ral/rt2661.c
===================================================================
--- head/sys/dev/ral/rt2661.c (revision 287196)
+++ head/sys/dev/ral/rt2661.c (revision 287197)
@@ -1,2846 +1,2783 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2006
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Ralink Technology RT2561, RT2561S and RT2661 chipset driver
* http://www.ralinktech.com/
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <dev/ral/rt2661reg.h>
#include <dev/ral/rt2661var.h>
#define RAL_DEBUG
#ifdef RAL_DEBUG
#define DPRINTF(sc, fmt, ...) do { \
if (sc->sc_debug > 0) \
printf(fmt, __VA_ARGS__); \
} while (0)
#define DPRINTFN(sc, n, fmt, ...) do { \
if (sc->sc_debug >= (n)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, fmt, ...)
#define DPRINTFN(sc, n, fmt, ...)
#endif
static struct ieee80211vap *rt2661_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode,
int, const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void rt2661_vap_delete(struct ieee80211vap *);
static void rt2661_dma_map_addr(void *, bus_dma_segment_t *, int,
int);
static int rt2661_alloc_tx_ring(struct rt2661_softc *,
struct rt2661_tx_ring *, int);
static void rt2661_reset_tx_ring(struct rt2661_softc *,
struct rt2661_tx_ring *);
static void rt2661_free_tx_ring(struct rt2661_softc *,
struct rt2661_tx_ring *);
static int rt2661_alloc_rx_ring(struct rt2661_softc *,
struct rt2661_rx_ring *, int);
static void rt2661_reset_rx_ring(struct rt2661_softc *,
struct rt2661_rx_ring *);
static void rt2661_free_rx_ring(struct rt2661_softc *,
struct rt2661_rx_ring *);
static int rt2661_newstate(struct ieee80211vap *,
enum ieee80211_state, int);
static uint16_t rt2661_eeprom_read(struct rt2661_softc *, uint8_t);
static void rt2661_rx_intr(struct rt2661_softc *);
static void rt2661_tx_intr(struct rt2661_softc *);
static void rt2661_tx_dma_intr(struct rt2661_softc *,
struct rt2661_tx_ring *);
static void rt2661_mcu_beacon_expire(struct rt2661_softc *);
static void rt2661_mcu_wakeup(struct rt2661_softc *);
static void rt2661_mcu_cmd_intr(struct rt2661_softc *);
static void rt2661_scan_start(struct ieee80211com *);
static void rt2661_scan_end(struct ieee80211com *);
static void rt2661_set_channel(struct ieee80211com *);
static void rt2661_setup_tx_desc(struct rt2661_softc *,
struct rt2661_tx_desc *, uint32_t, uint16_t, int,
int, const bus_dma_segment_t *, int, int);
static int rt2661_tx_data(struct rt2661_softc *, struct mbuf *,
struct ieee80211_node *, int);
static int rt2661_tx_mgt(struct rt2661_softc *, struct mbuf *,
struct ieee80211_node *);
-static void rt2661_start_locked(struct ifnet *);
-static void rt2661_start(struct ifnet *);
+static int rt2661_transmit(struct ieee80211com *, struct mbuf *);
+static void rt2661_start(struct rt2661_softc *);
static int rt2661_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void rt2661_watchdog(void *);
-static int rt2661_ioctl(struct ifnet *, u_long, caddr_t);
+static void rt2661_parent(struct ieee80211com *);
static void rt2661_bbp_write(struct rt2661_softc *, uint8_t,
uint8_t);
static uint8_t rt2661_bbp_read(struct rt2661_softc *, uint8_t);
static void rt2661_rf_write(struct rt2661_softc *, uint8_t,
uint32_t);
static int rt2661_tx_cmd(struct rt2661_softc *, uint8_t,
uint16_t);
static void rt2661_select_antenna(struct rt2661_softc *);
static void rt2661_enable_mrr(struct rt2661_softc *);
static void rt2661_set_txpreamble(struct rt2661_softc *);
static void rt2661_set_basicrates(struct rt2661_softc *,
const struct ieee80211_rateset *);
static void rt2661_select_band(struct rt2661_softc *,
struct ieee80211_channel *);
static void rt2661_set_chan(struct rt2661_softc *,
struct ieee80211_channel *);
static void rt2661_set_bssid(struct rt2661_softc *,
const uint8_t *);
static void rt2661_set_macaddr(struct rt2661_softc *,
const uint8_t *);
static void rt2661_update_promisc(struct ieee80211com *);
static int rt2661_wme_update(struct ieee80211com *) __unused;
static void rt2661_update_slot(struct ieee80211com *);
static const char *rt2661_get_rf(int);
static void rt2661_read_eeprom(struct rt2661_softc *,
uint8_t macaddr[IEEE80211_ADDR_LEN]);
static int rt2661_bbp_init(struct rt2661_softc *);
static void rt2661_init_locked(struct rt2661_softc *);
static void rt2661_init(void *);
static void rt2661_stop_locked(struct rt2661_softc *);
static void rt2661_stop(void *);
static int rt2661_load_microcode(struct rt2661_softc *);
#ifdef notyet
static void rt2661_rx_tune(struct rt2661_softc *);
static void rt2661_radar_start(struct rt2661_softc *);
static int rt2661_radar_stop(struct rt2661_softc *);
#endif
static int rt2661_prepare_beacon(struct rt2661_softc *,
struct ieee80211vap *);
static void rt2661_enable_tsf_sync(struct rt2661_softc *);
static void rt2661_enable_tsf(struct rt2661_softc *);
static int rt2661_get_rssi(struct rt2661_softc *, uint8_t);
static const struct {
uint32_t reg;
uint32_t val;
} rt2661_def_mac[] = {
RT2661_DEF_MAC
};
static const struct {
uint8_t reg;
uint8_t val;
} rt2661_def_bbp[] = {
RT2661_DEF_BBP
};
static const struct rfprog {
uint8_t chan;
uint32_t r1, r2, r3, r4;
} rt2661_rf5225_1[] = {
RT2661_RF5225_1
}, rt2661_rf5225_2[] = {
RT2661_RF5225_2
};
int
rt2661_attach(device_t dev, int id)
{
struct rt2661_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic;
- struct ifnet *ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t val;
int error, ac, ntries;
uint8_t bands;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
sc->sc_id = id;
sc->sc_dev = dev;
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- return ENOMEM;
- }
- ic = ifp->if_l2com;
-
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF | MTX_RECURSE);
callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
/* wait for NIC to initialize */
for (ntries = 0; ntries < 1000; ntries++) {
if ((val = RAL_READ(sc, RT2661_MAC_CSR0)) != 0)
break;
DELAY(1000);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for NIC to initialize\n");
error = EIO;
goto fail1;
}
/* retrieve RF rev. no and various other things from EEPROM */
- rt2661_read_eeprom(sc, macaddr);
+ rt2661_read_eeprom(sc, ic->ic_macaddr);
device_printf(dev, "MAC/BBP RT%X, RF %s\n", val,
rt2661_get_rf(sc->rf_rev));
/*
* Allocate Tx and Rx rings.
*/
for (ac = 0; ac < 4; ac++) {
error = rt2661_alloc_tx_ring(sc, &sc->txq[ac],
RT2661_TX_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate Tx ring %d\n", ac);
goto fail2;
}
}
error = rt2661_alloc_tx_ring(sc, &sc->mgtq, RT2661_MGT_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Mgt ring\n");
goto fail2;
}
error = rt2661_alloc_rx_ring(sc, &sc->rxq, RT2661_RX_RING_COUNT);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Rx ring\n");
goto fail3;
}
- ifp->if_softc = sc;
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = rt2661_init;
- ifp->if_ioctl = rt2661_ioctl;
- ifp->if_start = rt2661_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode */
| IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
| IEEE80211_C_HOSTAP /* hostap mode */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
| IEEE80211_C_WDS /* 4-address traffic works */
| IEEE80211_C_MBSS /* mesh point link mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
#ifdef notyet
| IEEE80211_C_TXFRAG /* handle tx frags */
| IEEE80211_C_WME /* 802.11e */
#endif
;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
if (sc->rf_rev == RT2661_RF_5225 || sc->rf_rev == RT2661_RF_5325)
setbit(&bands, IEEE80211_MODE_11A);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
#if 0
ic->ic_wme.wme_update = rt2661_wme_update;
#endif
ic->ic_scan_start = rt2661_scan_start;
ic->ic_scan_end = rt2661_scan_end;
ic->ic_set_channel = rt2661_set_channel;
ic->ic_updateslot = rt2661_update_slot;
ic->ic_update_promisc = rt2661_update_promisc;
ic->ic_raw_xmit = rt2661_raw_xmit;
-
+ ic->ic_transmit = rt2661_transmit;
+ ic->ic_parent = rt2661_parent;
ic->ic_vap_create = rt2661_vap_create;
ic->ic_vap_delete = rt2661_vap_delete;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
RT2661_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
RT2661_RX_RADIOTAP_PRESENT);
#ifdef RAL_DEBUG
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs");
#endif
if (bootverbose)
ieee80211_announce(ic);
return 0;
fail3: rt2661_free_tx_ring(sc, &sc->mgtq);
fail2: while (--ac >= 0)
rt2661_free_tx_ring(sc, &sc->txq[ac]);
fail1: mtx_destroy(&sc->sc_mtx);
- if_free(ifp);
return error;
}
int
rt2661_detach(void *xsc)
{
struct rt2661_softc *sc = xsc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
RAL_LOCK(sc);
rt2661_stop_locked(sc);
RAL_UNLOCK(sc);
ieee80211_ifdetach(ic);
+ mbufq_drain(&sc->sc_snd);
rt2661_free_tx_ring(sc, &sc->txq[0]);
rt2661_free_tx_ring(sc, &sc->txq[1]);
rt2661_free_tx_ring(sc, &sc->txq[2]);
rt2661_free_tx_ring(sc, &sc->txq[3]);
rt2661_free_tx_ring(sc, &sc->mgtq);
rt2661_free_rx_ring(sc, &sc->rxq);
- if_free(ifp);
-
mtx_destroy(&sc->sc_mtx);
return 0;
}
static struct ieee80211vap *
rt2661_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
- struct ifnet *ifp = ic->ic_ifp;
+ struct rt2661_softc *sc = ic->ic_softc;
struct rt2661_vap *rvp;
struct ieee80211vap *vap;
switch (opmode) {
case IEEE80211_M_STA:
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
case IEEE80211_M_MONITOR:
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
/* XXXRP: TBD */
if (!TAILQ_EMPTY(&ic->ic_vaps)) {
- if_printf(ifp, "only 1 vap supported\n");
+ device_printf(sc->sc_dev, "only 1 vap supported\n");
return NULL;
}
if (opmode == IEEE80211_M_STA)
flags |= IEEE80211_CLONE_NOBEACONS;
break;
case IEEE80211_M_WDS:
if (TAILQ_EMPTY(&ic->ic_vaps) ||
ic->ic_opmode != IEEE80211_M_HOSTAP) {
- if_printf(ifp, "wds only supported in ap mode\n");
+ device_printf(sc->sc_dev,
+ "wds only supported in ap mode\n");
return NULL;
}
/*
* Silently remove any request for a unique
* bssid; WDS vap's always share the local
* mac address.
*/
flags &= ~IEEE80211_CLONE_BSSID;
break;
default:
- if_printf(ifp, "unknown opmode %d\n", opmode);
+ device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
return NULL;
}
- rvp = (struct rt2661_vap *) malloc(sizeof(struct rt2661_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (rvp == NULL)
- return NULL;
+ rvp = malloc(sizeof(struct rt2661_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &rvp->ral_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override state transition machine */
rvp->ral_newstate = vap->iv_newstate;
vap->iv_newstate = rt2661_newstate;
#if 0
vap->iv_update_beacon = rt2661_beacon_update;
#endif
ieee80211_ratectl_init(vap);
/* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, ieee80211_media_change,
+ ieee80211_media_status, mac);
if (TAILQ_FIRST(&ic->ic_vaps) == vap)
ic->ic_opmode = opmode;
return vap;
}
static void
rt2661_vap_delete(struct ieee80211vap *vap)
{
struct rt2661_vap *rvp = RT2661_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(rvp, M_80211_VAP);
}
void
rt2661_shutdown(void *xsc)
{
struct rt2661_softc *sc = xsc;
rt2661_stop(sc);
}
void
rt2661_suspend(void *xsc)
{
struct rt2661_softc *sc = xsc;
rt2661_stop(sc);
}
void
rt2661_resume(void *xsc)
{
struct rt2661_softc *sc = xsc;
- struct ifnet *ifp = sc->sc_ifp;
- if (ifp->if_flags & IFF_UP)
+ if (sc->sc_ic.ic_nrunning > 0)
rt2661_init(sc);
}
static void
rt2661_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
rt2661_alloc_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring,
int count)
{
int i, error;
ring->count = count;
ring->queued = 0;
ring->cur = ring->next = ring->stat = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * RT2661_TX_DESC_SIZE, 1, count * RT2661_TX_DESC_SIZE,
0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * RT2661_TX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr,
0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
ring->data = malloc(count * sizeof (struct rt2661_tx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
RT2661_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
error = bus_dmamap_create(ring->data_dmat, 0,
&ring->data[i].map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
}
return 0;
fail: rt2661_free_tx_ring(sc, ring);
return error;
}
static void
rt2661_reset_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring)
{
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
int i;
for (i = 0; i < ring->count; i++) {
desc = &ring->desc[i];
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
desc->flags = 0;
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
ring->queued = 0;
ring->cur = ring->next = ring->stat = 0;
}
static void
rt2661_free_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring)
{
struct rt2661_tx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->ni != NULL)
ieee80211_free_node(data->ni);
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
rt2661_alloc_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring,
int count)
{
struct rt2661_rx_desc *desc;
struct rt2661_rx_data *data;
bus_addr_t physaddr;
int i, error;
ring->count = count;
ring->cur = ring->next = 0;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
count * RT2661_RX_DESC_SIZE, 1, count * RT2661_RX_DESC_SIZE,
0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc,
count * RT2661_RX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr,
0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
ring->data = malloc(count * sizeof (struct rt2661_rx_data), M_DEVBUF,
M_NOWAIT | M_ZERO);
if (ring->data == NULL) {
device_printf(sc->sc_dev, "could not allocate soft data\n");
error = ENOMEM;
goto fail;
}
/*
* Pre-allocate Rx buffers and populate Rx ring.
*/
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < count; i++) {
desc = &sc->rxq.desc[i];
data = &sc->rxq.data[i];
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr,
&physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load rx buf DMA map");
goto fail;
}
desc->flags = htole32(RT2661_RX_BUSY);
desc->physaddr = htole32(physaddr);
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
return 0;
fail: rt2661_free_rx_ring(sc, ring);
return error;
}
static void
rt2661_reset_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring)
{
int i;
for (i = 0; i < ring->count; i++)
ring->desc[i].flags = htole32(RT2661_RX_BUSY);
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
ring->cur = ring->next = 0;
}
static void
rt2661_free_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring)
{
struct rt2661_rx_data *data;
int i;
if (ring->desc != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
if (ring->data != NULL) {
for (i = 0; i < ring->count; i++) {
data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
free(ring->data, M_DEVBUF);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static int
rt2661_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct rt2661_vap *rvp = RT2661_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
- struct rt2661_softc *sc = ic->ic_ifp->if_softc;
+ struct rt2661_softc *sc = ic->ic_softc;
int error;
if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) {
uint32_t tmp;
/* abort TSF synchronization */
tmp = RAL_READ(sc, RT2661_TXRX_CSR9);
RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0x00ffffff);
}
error = rvp->ral_newstate(vap, nstate, arg);
if (error == 0 && nstate == IEEE80211_S_RUN) {
struct ieee80211_node *ni = vap->iv_bss;
if (vap->iv_opmode != IEEE80211_M_MONITOR) {
rt2661_enable_mrr(sc);
rt2661_set_txpreamble(sc);
rt2661_set_basicrates(sc, &ni->ni_rates);
rt2661_set_bssid(sc, ni->ni_bssid);
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_MBSS) {
error = rt2661_prepare_beacon(sc, vap);
if (error != 0)
return error;
}
if (vap->iv_opmode != IEEE80211_M_MONITOR)
rt2661_enable_tsf_sync(sc);
else
rt2661_enable_tsf(sc);
}
return error;
}
/*
* Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or
* 93C66).
*/
static uint16_t
rt2661_eeprom_read(struct rt2661_softc *sc, uint8_t addr)
{
uint32_t tmp;
uint16_t val;
int n;
/* clock C once before the first command */
RT2661_EEPROM_CTL(sc, 0);
RT2661_EEPROM_CTL(sc, RT2661_S);
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C);
RT2661_EEPROM_CTL(sc, RT2661_S);
/* write start bit (1) */
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D);
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C);
/* write READ opcode (10) */
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D);
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C);
RT2661_EEPROM_CTL(sc, RT2661_S);
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C);
/* write address (A5-A0 or A7-A0) */
n = (RAL_READ(sc, RT2661_E2PROM_CSR) & RT2661_93C46) ? 5 : 7;
for (; n >= 0; n--) {
RT2661_EEPROM_CTL(sc, RT2661_S |
(((addr >> n) & 1) << RT2661_SHIFT_D));
RT2661_EEPROM_CTL(sc, RT2661_S |
(((addr >> n) & 1) << RT2661_SHIFT_D) | RT2661_C);
}
RT2661_EEPROM_CTL(sc, RT2661_S);
/* read data Q15-Q0 */
val = 0;
for (n = 15; n >= 0; n--) {
RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C);
tmp = RAL_READ(sc, RT2661_E2PROM_CSR);
val |= ((tmp & RT2661_Q) >> RT2661_SHIFT_Q) << n;
RT2661_EEPROM_CTL(sc, RT2661_S);
}
RT2661_EEPROM_CTL(sc, 0);
/* clear Chip Select and clock C */
RT2661_EEPROM_CTL(sc, RT2661_S);
RT2661_EEPROM_CTL(sc, 0);
RT2661_EEPROM_CTL(sc, RT2661_C);
return val;
}
static void
rt2661_tx_intr(struct rt2661_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
struct rt2661_tx_ring *txq;
struct rt2661_tx_data *data;
uint32_t val;
- int qid, retrycnt;
+ int error, qid, retrycnt;
struct ieee80211vap *vap;
for (;;) {
struct ieee80211_node *ni;
struct mbuf *m;
val = RAL_READ(sc, RT2661_STA_CSR4);
if (!(val & RT2661_TX_STAT_VALID))
break;
/* retrieve the queue in which this frame was sent */
qid = RT2661_TX_QID(val);
txq = (qid <= 3) ? &sc->txq[qid] : &sc->mgtq;
/* retrieve rate control algorithm context */
data = &txq->data[txq->stat];
m = data->m;
data->m = NULL;
ni = data->ni;
data->ni = NULL;
/* if no frame has been sent, ignore */
if (ni == NULL)
continue;
else
vap = ni->ni_vap;
switch (RT2661_TX_RESULT(val)) {
case RT2661_TX_SUCCESS:
retrycnt = RT2661_TX_RETRYCNT(val);
DPRINTFN(sc, 10, "data frame sent successfully after "
"%d retries\n", retrycnt);
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_SUCCESS,
&retrycnt, NULL);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ error = 0;
break;
case RT2661_TX_RETRY_FAIL:
retrycnt = RT2661_TX_RETRYCNT(val);
DPRINTFN(sc, 9, "%s\n",
"sending data frame failed (too much retries)");
if (data->rix != IEEE80211_FIXED_RATE_NONE)
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_FAILURE,
&retrycnt, NULL);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ error = 1;
break;
default:
/* other failure */
device_printf(sc->sc_dev,
"sending data frame failed 0x%08x\n", val);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ error = 1;
}
DPRINTFN(sc, 15, "tx done q=%d idx=%u\n", qid, txq->stat);
txq->queued--;
if (++txq->stat >= txq->count) /* faster than % count */
txq->stat = 0;
- if (m->m_flags & M_TXCB)
- ieee80211_process_callback(ni, m,
- RT2661_TX_RESULT(val) != RT2661_TX_SUCCESS);
- m_freem(m);
- ieee80211_free_node(ni);
+ ieee80211_tx_complete(ni, m, error);
}
sc->sc_tx_timer = 0;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- rt2661_start_locked(ifp);
+ rt2661_start(sc);
}
static void
rt2661_tx_dma_intr(struct rt2661_softc *sc, struct rt2661_tx_ring *txq)
{
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_POSTREAD);
for (;;) {
desc = &txq->desc[txq->next];
data = &txq->data[txq->next];
if ((le32toh(desc->flags) & RT2661_TX_BUSY) ||
!(le32toh(desc->flags) & RT2661_TX_VALID))
break;
bus_dmamap_sync(txq->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(txq->data_dmat, data->map);
/* descriptor is no longer valid */
desc->flags &= ~htole32(RT2661_TX_VALID);
DPRINTFN(sc, 15, "tx dma done q=%p idx=%u\n", txq, txq->next);
if (++txq->next >= txq->count) /* faster than % count */
txq->next = 0;
}
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE);
}
static void
rt2661_rx_intr(struct rt2661_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct rt2661_rx_desc *desc;
struct rt2661_rx_data *data;
bus_addr_t physaddr;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *mnew, *m;
int error;
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_POSTREAD);
for (;;) {
int8_t rssi, nf;
desc = &sc->rxq.desc[sc->rxq.cur];
data = &sc->rxq.data[sc->rxq.cur];
if (le32toh(desc->flags) & RT2661_RX_BUSY)
break;
if ((le32toh(desc->flags) & RT2661_RX_PHY_ERROR) ||
(le32toh(desc->flags) & RT2661_RX_CRC_ERROR)) {
/*
* This should not happen since we did not request
* to receive those frames when we filled TXRX_CSR0.
*/
DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n",
le32toh(desc->flags));
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
if ((le32toh(desc->flags) & RT2661_RX_CIPHER_MASK) != 0) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
/*
* Try to allocate a new mbuf for this ring element and load it
* before processing the current mbuf. If the ring element
* cannot be loaded, drop the received packet and reuse the old
* mbuf. In the unlikely case that the old mbuf can't be
* reloaded either, explicitly panic.
*/
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxq.data_dmat, data->map);
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(mnew, void *), MCLBYTES, rt2661_dma_map_addr,
&physaddr, 0);
if (error != 0) {
m_freem(mnew);
/* try to reload the old mbuf */
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(data->m, void *), MCLBYTES,
rt2661_dma_map_addr, &physaddr, 0);
if (error != 0) {
/* very unlikely that it will fail... */
panic("%s: could not load old rx mbuf",
device_get_name(sc->sc_dev));
}
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
/*
* New mbuf successfully loaded, update Rx ring and continue
* processing.
*/
m = data->m;
data->m = mnew;
desc->physaddr = htole32(physaddr);
/* finalize mbuf */
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len =
(le32toh(desc->flags) >> 16) & 0xfff;
rssi = rt2661_get_rssi(sc, desc->rssi);
/* Error happened during RSSI conversion. */
if (rssi < 0)
rssi = -30; /* XXX ignored by net80211 */
nf = RT2661_NOISE_FLOOR;
if (ieee80211_radiotap_active(ic)) {
struct rt2661_rx_radiotap_header *tap = &sc->sc_rxtap;
uint32_t tsf_lo, tsf_hi;
/* get timestamp (low and high 32 bits) */
tsf_hi = RAL_READ(sc, RT2661_TXRX_CSR13);
tsf_lo = RAL_READ(sc, RT2661_TXRX_CSR12);
tap->wr_tsf =
htole64(((uint64_t)tsf_hi << 32) | tsf_lo);
tap->wr_flags = 0;
tap->wr_rate = ieee80211_plcp2rate(desc->rate,
(desc->flags & htole32(RT2661_RX_OFDM)) ?
IEEE80211_T_OFDM : IEEE80211_T_CCK);
tap->wr_antsignal = nf + rssi;
tap->wr_antnoise = nf;
}
sc->sc_flags |= RAL_INPUT_RUNNING;
RAL_UNLOCK(sc);
wh = mtod(m, struct ieee80211_frame *);
/* send the frame to the 802.11 layer */
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
RAL_LOCK(sc);
sc->sc_flags &= ~RAL_INPUT_RUNNING;
skip: desc->flags |= htole32(RT2661_RX_BUSY);
DPRINTFN(sc, 15, "rx intr idx=%u\n", sc->rxq.cur);
sc->rxq.cur = (sc->rxq.cur + 1) % RT2661_RX_RING_COUNT;
}
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_PREWRITE);
}
/* ARGSUSED */
static void
rt2661_mcu_beacon_expire(struct rt2661_softc *sc)
{
/* do nothing */
}
static void
rt2661_mcu_wakeup(struct rt2661_softc *sc)
{
RAL_WRITE(sc, RT2661_MAC_CSR11, 5 << 16);
RAL_WRITE(sc, RT2661_SOFT_RESET_CSR, 0x7);
RAL_WRITE(sc, RT2661_IO_CNTL_CSR, 0x18);
RAL_WRITE(sc, RT2661_PCI_USEC_CSR, 0x20);
/* send wakeup command to MCU */
rt2661_tx_cmd(sc, RT2661_MCU_CMD_WAKEUP, 0);
}
static void
rt2661_mcu_cmd_intr(struct rt2661_softc *sc)
{
RAL_READ(sc, RT2661_M2H_CMD_DONE_CSR);
RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff);
}
void
rt2661_intr(void *arg)
{
struct rt2661_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t r1, r2;
RAL_LOCK(sc);
/* disable MAC and MCU interrupts */
RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffff7f);
RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff);
/* don't re-enable interrupts if we're shutting down */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(sc->sc_flags & RAL_RUNNING)) {
RAL_UNLOCK(sc);
return;
}
r1 = RAL_READ(sc, RT2661_INT_SOURCE_CSR);
RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, r1);
r2 = RAL_READ(sc, RT2661_MCU_INT_SOURCE_CSR);
RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, r2);
if (r1 & RT2661_MGT_DONE)
rt2661_tx_dma_intr(sc, &sc->mgtq);
if (r1 & RT2661_RX_DONE)
rt2661_rx_intr(sc);
if (r1 & RT2661_TX0_DMA_DONE)
rt2661_tx_dma_intr(sc, &sc->txq[0]);
if (r1 & RT2661_TX1_DMA_DONE)
rt2661_tx_dma_intr(sc, &sc->txq[1]);
if (r1 & RT2661_TX2_DMA_DONE)
rt2661_tx_dma_intr(sc, &sc->txq[2]);
if (r1 & RT2661_TX3_DMA_DONE)
rt2661_tx_dma_intr(sc, &sc->txq[3]);
if (r1 & RT2661_TX_DONE)
rt2661_tx_intr(sc);
if (r2 & RT2661_MCU_CMD_DONE)
rt2661_mcu_cmd_intr(sc);
if (r2 & RT2661_MCU_BEACON_EXPIRE)
rt2661_mcu_beacon_expire(sc);
if (r2 & RT2661_MCU_WAKEUP)
rt2661_mcu_wakeup(sc);
/* re-enable MAC and MCU interrupts */
RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10);
RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0);
RAL_UNLOCK(sc);
}
static uint8_t
rt2661_plcp_signal(int rate)
{
switch (rate) {
/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
case 12: return 0xb;
case 18: return 0xf;
case 24: return 0xa;
case 36: return 0xe;
case 48: return 0x9;
case 72: return 0xd;
case 96: return 0x8;
case 108: return 0xc;
/* CCK rates (NB: not IEEE std, device-specific) */
case 2: return 0x0;
case 4: return 0x1;
case 11: return 0x2;
case 22: return 0x3;
}
return 0xff; /* XXX unsupported/unknown rate */
}
static void
rt2661_setup_tx_desc(struct rt2661_softc *sc, struct rt2661_tx_desc *desc,
uint32_t flags, uint16_t xflags, int len, int rate,
const bus_dma_segment_t *segs, int nsegs, int ac)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t plcp_length;
int i, remainder;
desc->flags = htole32(flags);
desc->flags |= htole32(len << 16);
desc->flags |= htole32(RT2661_TX_BUSY | RT2661_TX_VALID);
desc->xflags = htole16(xflags);
desc->xflags |= htole16(nsegs << 13);
desc->wme = htole16(
RT2661_QID(ac) |
RT2661_AIFSN(2) |
RT2661_LOGCWMIN(4) |
RT2661_LOGCWMAX(10));
/*
* Remember in which queue this frame was sent. This field is driver
* private data only. It will be made available by the NIC in STA_CSR4
* on Tx interrupts.
*/
desc->qid = ac;
/* setup PLCP fields */
desc->plcp_signal = rt2661_plcp_signal(rate);
desc->plcp_service = 4;
len += IEEE80211_CRC_LEN;
if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) {
desc->flags |= htole32(RT2661_TX_OFDM);
plcp_length = len & 0xfff;
desc->plcp_length_hi = plcp_length >> 6;
desc->plcp_length_lo = plcp_length & 0x3f;
} else {
plcp_length = (16 * len + rate - 1) / rate;
if (rate == 22) {
remainder = (16 * len) % 22;
if (remainder != 0 && remainder < 7)
desc->plcp_service |= RT2661_PLCP_LENGEXT;
}
desc->plcp_length_hi = plcp_length >> 8;
desc->plcp_length_lo = plcp_length & 0xff;
if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
desc->plcp_signal |= 0x08;
}
/* RT2x61 supports scatter with up to 5 segments */
for (i = 0; i < nsegs; i++) {
desc->addr[i] = htole32(segs[i].ds_addr);
desc->len [i] = htole16(segs[i].ds_len);
}
}
static int
rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
struct ieee80211_frame *wh;
struct ieee80211_key *k;
bus_dma_segment_t segs[RT2661_MAX_SCATTER];
uint16_t dur;
uint32_t flags = 0; /* XXX HWSEQ */
int nsegs, rate, error;
desc = &sc->mgtq.desc[sc->mgtq.cur];
data = &sc->mgtq.data[sc->mgtq.cur];
rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate;
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
}
error = bus_dmamap_load_mbuf_sg(sc->mgtq.data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
/* management frames are not taken into account for amrr */
data->rix = IEEE80211_FIXED_RATE_NONE;
wh = mtod(m0, struct ieee80211_frame *);
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2661_TX_NEED_ACK;
dur = ieee80211_ack_duration(ic->ic_rt,
rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE);
*(uint16_t *)wh->i_dur = htole16(dur);
/* tell hardware to add timestamp in probe responses */
if ((wh->i_fc[0] &
(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
(IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
flags |= RT2661_TX_TIMESTAMP;
}
rt2661_setup_tx_desc(sc, desc, flags, 0 /* XXX HWSEQ */,
m0->m_pkthdr.len, rate, segs, nsegs, RT2661_QID_MGT);
bus_dmamap_sync(sc->mgtq.data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->mgtq.desc_dmat, sc->mgtq.desc_map,
BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, sc->mgtq.cur, rate);
/* kick mgt */
sc->mgtq.queued++;
sc->mgtq.cur = (sc->mgtq.cur + 1) % RT2661_MGT_RING_COUNT;
RAL_WRITE(sc, RT2661_TX_CNTL_CSR, RT2661_KICK_MGT);
return 0;
}
static int
rt2661_sendprot(struct rt2661_softc *sc, int ac,
const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate)
{
struct ieee80211com *ic = ni->ni_ic;
struct rt2661_tx_ring *txq = &sc->txq[ac];
const struct ieee80211_frame *wh;
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
struct mbuf *mprot;
int protrate, ackrate, pktlen, flags, isshort, error;
uint16_t dur;
bus_dma_segment_t segs[RT2661_MAX_SCATTER];
int nsegs;
KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY,
("protection %d", prot));
wh = mtod(m, const struct ieee80211_frame *);
pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN;
protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
ackrate = ieee80211_ack_rate(ic->ic_rt, rate);
isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0;
dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort)
+ ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags = RT2661_TX_MORE_FRAG;
if (prot == IEEE80211_PROT_RTSCTS) {
/* NB: CTS is the same size as an ACK */
dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags |= RT2661_TX_NEED_ACK;
mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur);
} else {
mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur);
}
if (mprot == NULL) {
/* XXX stat + msg */
return ENOBUFS;
}
data = &txq->data[txq->cur];
desc = &txq->desc[txq->cur];
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, mprot, segs,
&nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(mprot);
return error;
}
data->m = mprot;
data->ni = ieee80211_ref_node(ni);
/* ctl frames are not taken into account for amrr */
data->rix = IEEE80211_FIXED_RATE_NONE;
rt2661_setup_tx_desc(sc, desc, flags, 0, mprot->m_pkthdr.len,
protrate, segs, 1, ac);
bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE);
txq->queued++;
txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT;
return 0;
}
static int
rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0,
struct ieee80211_node *ni, int ac)
{
struct ieee80211vap *vap = ni->ni_vap;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct rt2661_tx_ring *txq = &sc->txq[ac];
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp;
struct ieee80211_key *k;
const struct chanAccParams *cap;
struct mbuf *mnew;
bus_dma_segment_t segs[RT2661_MAX_SCATTER];
uint16_t dur;
uint32_t flags;
int error, nsegs, rate, noack = 0;
wh = mtod(m0, struct ieee80211_frame *);
tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
rate = tp->mcastrate;
} else if (m0->m_flags & M_EAPOL) {
rate = tp->mgmtrate;
} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
rate = tp->ucastrate;
} else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
}
rate &= IEEE80211_RATE_VAL;
if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
cap = &ic->ic_wme.wme_chanParams;
noack = cap->cap_wmeParams[ac].wmep_noackPolicy;
}
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
flags = 0;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
int prot = IEEE80211_PROT_NONE;
if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
prot = IEEE80211_PROT_RTSCTS;
else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM)
prot = ic->ic_protmode;
if (prot != IEEE80211_PROT_NONE) {
error = rt2661_sendprot(sc, ac, m0, ni, prot, rate);
if (error) {
m_freem(m0);
return error;
}
flags |= RT2661_TX_LONG_RETRY | RT2661_TX_IFS;
}
}
data = &txq->data[txq->cur];
desc = &txq->desc[txq->cur];
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs,
&nsegs, 0);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev, "could not map mbuf (error %d)\n",
error);
m_freem(m0);
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
m_freem(m0);
return ENOBUFS;
}
m0 = mnew;
error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0,
segs, &nsegs, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not map mbuf (error %d)\n", error);
m_freem(m0);
return error;
}
/* packet header have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
ieee80211_radiotap_tx(vap, m0);
}
data->m = m0;
data->ni = ni;
/* remember link conditions for rate adaptation algorithm */
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) {
data->rix = ni->ni_txrate;
/* XXX probably need last rssi value and not avg */
data->rssi = ic->ic_node_getrssi(ni);
} else
data->rix = IEEE80211_FIXED_RATE_NONE;
if (!noack && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2661_TX_NEED_ACK;
dur = ieee80211_ack_duration(ic->ic_rt,
rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE);
*(uint16_t *)wh->i_dur = htole16(dur);
}
rt2661_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, rate, segs,
nsegs, ac);
bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE);
DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n",
m0->m_pkthdr.len, txq->cur, rate);
/* kick Tx */
txq->queued++;
txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT;
RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 1 << ac);
return 0;
}
+static int
+rt2661_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct rt2661_softc *sc = ic->ic_softc;
+ int error;
+
+ RAL_LOCK(sc);
+ if ((sc->sc_flags & RAL_RUNNING) == 0) {
+ RAL_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ RAL_UNLOCK(sc);
+ return (error);
+ }
+ rt2661_start(sc);
+ RAL_UNLOCK(sc);
+
+ return (0);
+}
+
static void
-rt2661_start_locked(struct ifnet *ifp)
+rt2661_start(struct rt2661_softc *sc)
{
- struct rt2661_softc *sc = ifp->if_softc;
struct mbuf *m;
struct ieee80211_node *ni;
int ac;
RAL_LOCK_ASSERT(sc);
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || sc->sc_invalid)
+ if (!(sc->sc_flags & RAL_RUNNING) || sc->sc_invalid)
return;
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
-
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ac = M_WME_GETAC(m);
if (sc->txq[ac].queued >= RT2661_TX_RING_COUNT - 1) {
/* there is no place left in this ring */
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ mbufq_prepend(&sc->sc_snd, m);
break;
}
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
if (rt2661_tx_data(sc, m, ni, ac) != 0) {
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
break;
}
-
sc->sc_tx_timer = 5;
}
}
-static void
-rt2661_start(struct ifnet *ifp)
-{
- struct rt2661_softc *sc = ifp->if_softc;
-
- RAL_LOCK(sc);
- rt2661_start_locked(ifp);
- RAL_UNLOCK(sc);
-}
-
static int
rt2661_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2661_softc *sc = ifp->if_softc;
+ struct rt2661_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(sc->sc_flags & RAL_RUNNING)) {
RAL_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return ENETDOWN;
}
if (sc->mgtq.queued >= RT2661_MGT_RING_COUNT) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
RAL_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return ENOBUFS; /* XXX */
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
* XXX raw path
*/
if (rt2661_tx_mgt(sc, m, ni) != 0)
goto bad;
sc->sc_tx_timer = 5;
RAL_UNLOCK(sc);
return 0;
bad:
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
RAL_UNLOCK(sc);
return EIO; /* XXX */
}
static void
rt2661_watchdog(void *arg)
{
struct rt2661_softc *sc = (struct rt2661_softc *)arg;
- struct ifnet *ifp = sc->sc_ifp;
RAL_LOCK_ASSERT(sc);
- KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
+ KASSERT(sc->sc_flags & RAL_RUNNING, ("not running"));
if (sc->sc_invalid) /* card ejected */
return;
if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) {
- if_printf(ifp, "device timeout\n");
+ device_printf(sc->sc_dev, "device timeout\n");
rt2661_init_locked(sc);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
/* NB: callout is reset in rt2661_init() */
return;
}
callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc);
}
-static int
-rt2661_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+rt2661_parent(struct ieee80211com *ic)
{
- struct rt2661_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ struct rt2661_softc *sc = ic->ic_softc;
+ int startall = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- RAL_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- rt2661_init_locked(sc);
- startall = 1;
- } else
- rt2661_update_promisc(ic);
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- rt2661_stop_locked(sc);
- }
- RAL_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
- }
- return error;
+ RAL_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ if ((sc->sc_flags & RAL_RUNNING) == 0) {
+ rt2661_init_locked(sc);
+ startall = 1;
+ } else
+ rt2661_update_promisc(ic);
+ } else if (sc->sc_flags & RAL_RUNNING)
+ rt2661_stop_locked(sc);
+ RAL_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static void
rt2661_bbp_write(struct rt2661_softc *sc, uint8_t reg, uint8_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to BBP\n");
return;
}
tmp = RT2661_BBP_BUSY | (reg & 0x7f) << 8 | val;
RAL_WRITE(sc, RT2661_PHY_CSR3, tmp);
DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val);
}
static uint8_t
rt2661_bbp_read(struct rt2661_softc *sc, uint8_t reg)
{
uint32_t val;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not read from BBP\n");
return 0;
}
val = RT2661_BBP_BUSY | RT2661_BBP_READ | reg << 8;
RAL_WRITE(sc, RT2661_PHY_CSR3, val);
for (ntries = 0; ntries < 100; ntries++) {
val = RAL_READ(sc, RT2661_PHY_CSR3);
if (!(val & RT2661_BBP_BUSY))
return val & 0xff;
DELAY(1);
}
device_printf(sc->sc_dev, "could not read from BBP\n");
return 0;
}
static void
rt2661_rf_write(struct rt2661_softc *sc, uint8_t reg, uint32_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2661_PHY_CSR4) & RT2661_RF_BUSY))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to RF\n");
return;
}
tmp = RT2661_RF_BUSY | RT2661_RF_21BIT | (val & 0x1fffff) << 2 |
(reg & 3);
RAL_WRITE(sc, RT2661_PHY_CSR4, tmp);
/* remember last written value in sc */
sc->rf_regs[reg] = val;
DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0x1fffff);
}
static int
rt2661_tx_cmd(struct rt2661_softc *sc, uint8_t cmd, uint16_t arg)
{
if (RAL_READ(sc, RT2661_H2M_MAILBOX_CSR) & RT2661_H2M_BUSY)
return EIO; /* there is already a command pending */
RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR,
RT2661_H2M_BUSY | RT2661_TOKEN_NO_INTR << 16 | arg);
RAL_WRITE(sc, RT2661_HOST_CMD_CSR, RT2661_KICK_CMD | cmd);
return 0;
}
static void
rt2661_select_antenna(struct rt2661_softc *sc)
{
uint8_t bbp4, bbp77;
uint32_t tmp;
bbp4 = rt2661_bbp_read(sc, 4);
bbp77 = rt2661_bbp_read(sc, 77);
/* TBD */
/* make sure Rx is disabled before switching antenna */
tmp = RAL_READ(sc, RT2661_TXRX_CSR0);
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX);
rt2661_bbp_write(sc, 4, bbp4);
rt2661_bbp_write(sc, 77, bbp77);
/* restore Rx filter */
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp);
}
/*
* Enable multi-rate retries for frames sent at OFDM rates.
* In 802.11b/g mode, allow fallback to CCK rates.
*/
static void
rt2661_enable_mrr(struct rt2661_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
tmp = RAL_READ(sc, RT2661_TXRX_CSR4);
tmp &= ~RT2661_MRR_CCK_FALLBACK;
if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
tmp |= RT2661_MRR_CCK_FALLBACK;
tmp |= RT2661_MRR_ENABLED;
RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp);
}
static void
rt2661_set_txpreamble(struct rt2661_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
tmp = RAL_READ(sc, RT2661_TXRX_CSR4);
tmp &= ~RT2661_SHORT_PREAMBLE;
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
tmp |= RT2661_SHORT_PREAMBLE;
RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp);
}
static void
rt2661_set_basicrates(struct rt2661_softc *sc,
const struct ieee80211_rateset *rs)
{
#define RV(r) ((r) & IEEE80211_RATE_VAL)
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t mask = 0;
uint8_t rate;
int i;
for (i = 0; i < rs->rs_nrates; i++) {
rate = rs->rs_rates[i];
if (!(rate & IEEE80211_RATE_BASIC))
continue;
mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, RV(rate));
}
RAL_WRITE(sc, RT2661_TXRX_CSR5, mask);
DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask);
#undef RV
}
/*
* Reprogram MAC/BBP to switch to a new band. Values taken from the reference
* driver.
*/
static void
rt2661_select_band(struct rt2661_softc *sc, struct ieee80211_channel *c)
{
uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104;
uint32_t tmp;
/* update all BBP registers that depend on the band */
bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c;
bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48;
if (IEEE80211_IS_CHAN_5GHZ(c)) {
bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c;
bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10;
}
if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) ||
(IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) {
bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10;
}
rt2661_bbp_write(sc, 17, bbp17);
rt2661_bbp_write(sc, 96, bbp96);
rt2661_bbp_write(sc, 104, bbp104);
if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) ||
(IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) {
rt2661_bbp_write(sc, 75, 0x80);
rt2661_bbp_write(sc, 86, 0x80);
rt2661_bbp_write(sc, 88, 0x80);
}
rt2661_bbp_write(sc, 35, bbp35);
rt2661_bbp_write(sc, 97, bbp97);
rt2661_bbp_write(sc, 98, bbp98);
tmp = RAL_READ(sc, RT2661_PHY_CSR0);
tmp &= ~(RT2661_PA_PE_2GHZ | RT2661_PA_PE_5GHZ);
if (IEEE80211_IS_CHAN_2GHZ(c))
tmp |= RT2661_PA_PE_2GHZ;
else
tmp |= RT2661_PA_PE_5GHZ;
RAL_WRITE(sc, RT2661_PHY_CSR0, tmp);
}
static void
rt2661_set_chan(struct rt2661_softc *sc, struct ieee80211_channel *c)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct rfprog *rfprog;
uint8_t bbp3, bbp94 = RT2661_BBPR94_DEFAULT;
int8_t power;
u_int i, chan;
chan = ieee80211_chan2ieee(ic, c);
KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan));
/* select the appropriate RF settings based on what EEPROM says */
rfprog = (sc->rfprog == 0) ? rt2661_rf5225_1 : rt2661_rf5225_2;
/* find the settings for this channel (we know it exists) */
for (i = 0; rfprog[i].chan != chan; i++);
power = sc->txpow[i];
if (power < 0) {
bbp94 += power;
power = 0;
} else if (power > 31) {
bbp94 += power - 31;
power = 31;
}
/*
* If we are switching from the 2GHz band to the 5GHz band or
* vice-versa, BBP registers need to be reprogrammed.
*/
if (c->ic_flags != sc->sc_curchan->ic_flags) {
rt2661_select_band(sc, c);
rt2661_select_antenna(sc);
}
sc->sc_curchan = c;
rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1);
rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2);
rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7);
rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10);
DELAY(200);
rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1);
rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2);
rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7 | 1);
rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10);
DELAY(200);
rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1);
rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2);
rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7);
rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10);
/* enable smart mode for MIMO-capable RFs */
bbp3 = rt2661_bbp_read(sc, 3);
bbp3 &= ~RT2661_SMART_MODE;
if (sc->rf_rev == RT2661_RF_5325 || sc->rf_rev == RT2661_RF_2529)
bbp3 |= RT2661_SMART_MODE;
rt2661_bbp_write(sc, 3, bbp3);
if (bbp94 != RT2661_BBPR94_DEFAULT)
rt2661_bbp_write(sc, 94, bbp94);
/* 5GHz radio needs a 1ms delay here */
if (IEEE80211_IS_CHAN_5GHZ(c))
DELAY(1000);
}
static void
rt2661_set_bssid(struct rt2661_softc *sc, const uint8_t *bssid)
{
uint32_t tmp;
tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24;
RAL_WRITE(sc, RT2661_MAC_CSR4, tmp);
tmp = bssid[4] | bssid[5] << 8 | RT2661_ONE_BSSID << 16;
RAL_WRITE(sc, RT2661_MAC_CSR5, tmp);
}
static void
rt2661_set_macaddr(struct rt2661_softc *sc, const uint8_t *addr)
{
uint32_t tmp;
tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24;
RAL_WRITE(sc, RT2661_MAC_CSR2, tmp);
tmp = addr[4] | addr[5] << 8;
RAL_WRITE(sc, RT2661_MAC_CSR3, tmp);
}
static void
rt2661_update_promisc(struct ieee80211com *ic)
{
struct rt2661_softc *sc = ic->ic_softc;
uint32_t tmp;
tmp = RAL_READ(sc, RT2661_TXRX_CSR0);
tmp &= ~RT2661_DROP_NOT_TO_ME;
- if (!(ic->ic_ifp->if_flags & IFF_PROMISC))
+ if (ic->ic_promisc == 0)
tmp |= RT2661_DROP_NOT_TO_ME;
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp);
DPRINTF(sc, "%s promiscuous mode\n",
- (ic->ic_ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving");
+ (ic->ic_promisc > 0) ? "entering" : "leaving");
}
/*
* Update QoS (802.11e) settings for each h/w Tx ring.
*/
static int
rt2661_wme_update(struct ieee80211com *ic)
{
- struct rt2661_softc *sc = ic->ic_ifp->if_softc;
+ struct rt2661_softc *sc = ic->ic_softc;
const struct wmeParams *wmep;
wmep = ic->ic_wme.wme_chanParams.cap_wmeParams;
/* XXX: not sure about shifts. */
/* XXX: the reference driver plays with AC_VI settings too. */
/* update TxOp */
RAL_WRITE(sc, RT2661_AC_TXOP_CSR0,
wmep[WME_AC_BE].wmep_txopLimit << 16 |
wmep[WME_AC_BK].wmep_txopLimit);
RAL_WRITE(sc, RT2661_AC_TXOP_CSR1,
wmep[WME_AC_VI].wmep_txopLimit << 16 |
wmep[WME_AC_VO].wmep_txopLimit);
/* update CWmin */
RAL_WRITE(sc, RT2661_CWMIN_CSR,
wmep[WME_AC_BE].wmep_logcwmin << 12 |
wmep[WME_AC_BK].wmep_logcwmin << 8 |
wmep[WME_AC_VI].wmep_logcwmin << 4 |
wmep[WME_AC_VO].wmep_logcwmin);
/* update CWmax */
RAL_WRITE(sc, RT2661_CWMAX_CSR,
wmep[WME_AC_BE].wmep_logcwmax << 12 |
wmep[WME_AC_BK].wmep_logcwmax << 8 |
wmep[WME_AC_VI].wmep_logcwmax << 4 |
wmep[WME_AC_VO].wmep_logcwmax);
/* update Aifsn */
RAL_WRITE(sc, RT2661_AIFSN_CSR,
wmep[WME_AC_BE].wmep_aifsn << 12 |
wmep[WME_AC_BK].wmep_aifsn << 8 |
wmep[WME_AC_VI].wmep_aifsn << 4 |
wmep[WME_AC_VO].wmep_aifsn);
return 0;
}
static void
rt2661_update_slot(struct ieee80211com *ic)
{
struct rt2661_softc *sc = ic->ic_softc;
uint8_t slottime;
uint32_t tmp;
slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20;
tmp = RAL_READ(sc, RT2661_MAC_CSR9);
tmp = (tmp & ~0xff) | slottime;
RAL_WRITE(sc, RT2661_MAC_CSR9, tmp);
}
static const char *
rt2661_get_rf(int rev)
{
switch (rev) {
case RT2661_RF_5225: return "RT5225";
case RT2661_RF_5325: return "RT5325 (MIMO XR)";
case RT2661_RF_2527: return "RT2527";
case RT2661_RF_2529: return "RT2529 (MIMO XR)";
default: return "unknown";
}
}
static void
rt2661_read_eeprom(struct rt2661_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
{
uint16_t val;
int i;
/* read MAC address */
val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC01);
macaddr[0] = val & 0xff;
macaddr[1] = val >> 8;
val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC23);
macaddr[2] = val & 0xff;
macaddr[3] = val >> 8;
val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC45);
macaddr[4] = val & 0xff;
macaddr[5] = val >> 8;
val = rt2661_eeprom_read(sc, RT2661_EEPROM_ANTENNA);
/* XXX: test if different from 0xffff? */
sc->rf_rev = (val >> 11) & 0x1f;
sc->hw_radio = (val >> 10) & 0x1;
sc->rx_ant = (val >> 4) & 0x3;
sc->tx_ant = (val >> 2) & 0x3;
sc->nb_ant = val & 0x3;
DPRINTF(sc, "RF revision=%d\n", sc->rf_rev);
val = rt2661_eeprom_read(sc, RT2661_EEPROM_CONFIG2);
sc->ext_5ghz_lna = (val >> 6) & 0x1;
sc->ext_2ghz_lna = (val >> 4) & 0x1;
DPRINTF(sc, "External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n",
sc->ext_2ghz_lna, sc->ext_5ghz_lna);
val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_2GHZ_OFFSET);
if ((val & 0xff) != 0xff)
sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */
/* Only [-10, 10] is valid */
if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10)
sc->rssi_2ghz_corr = 0;
val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_5GHZ_OFFSET);
if ((val & 0xff) != 0xff)
sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */
/* Only [-10, 10] is valid */
if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10)
sc->rssi_5ghz_corr = 0;
/* adjust RSSI correction for external low-noise amplifier */
if (sc->ext_2ghz_lna)
sc->rssi_2ghz_corr -= 14;
if (sc->ext_5ghz_lna)
sc->rssi_5ghz_corr -= 14;
DPRINTF(sc, "RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n",
sc->rssi_2ghz_corr, sc->rssi_5ghz_corr);
val = rt2661_eeprom_read(sc, RT2661_EEPROM_FREQ_OFFSET);
if ((val >> 8) != 0xff)
sc->rfprog = (val >> 8) & 0x3;
if ((val & 0xff) != 0xff)
sc->rffreq = val & 0xff;
DPRINTF(sc, "RF prog=%d\nRF freq=%d\n", sc->rfprog, sc->rffreq);
/* read Tx power for all a/b/g channels */
for (i = 0; i < 19; i++) {
val = rt2661_eeprom_read(sc, RT2661_EEPROM_TXPOWER + i);
sc->txpow[i * 2] = (int8_t)(val >> 8); /* signed */
DPRINTF(sc, "Channel=%d Tx power=%d\n",
rt2661_rf5225_1[i * 2].chan, sc->txpow[i * 2]);
sc->txpow[i * 2 + 1] = (int8_t)(val & 0xff); /* signed */
DPRINTF(sc, "Channel=%d Tx power=%d\n",
rt2661_rf5225_1[i * 2 + 1].chan, sc->txpow[i * 2 + 1]);
}
/* read vendor-specific BBP values */
for (i = 0; i < 16; i++) {
val = rt2661_eeprom_read(sc, RT2661_EEPROM_BBP_BASE + i);
if (val == 0 || val == 0xffff)
continue; /* skip invalid entries */
sc->bbp_prom[i].reg = val >> 8;
sc->bbp_prom[i].val = val & 0xff;
DPRINTF(sc, "BBP R%d=%02x\n", sc->bbp_prom[i].reg,
sc->bbp_prom[i].val);
}
}
static int
rt2661_bbp_init(struct rt2661_softc *sc)
{
#define N(a) (sizeof (a) / sizeof ((a)[0]))
int i, ntries;
uint8_t val;
/* wait for BBP to be ready */
for (ntries = 0; ntries < 100; ntries++) {
val = rt2661_bbp_read(sc, 0);
if (val != 0 && val != 0xff)
break;
DELAY(100);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for BBP\n");
return EIO;
}
/* initialize BBP registers to default values */
for (i = 0; i < N(rt2661_def_bbp); i++) {
rt2661_bbp_write(sc, rt2661_def_bbp[i].reg,
rt2661_def_bbp[i].val);
}
/* write vendor-specific BBP values (from EEPROM) */
for (i = 0; i < 16; i++) {
if (sc->bbp_prom[i].reg == 0)
continue;
rt2661_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
}
return 0;
#undef N
}
static void
rt2661_init_locked(struct rt2661_softc *sc)
{
#define N(a) (sizeof (a) / sizeof ((a)[0]))
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp, sta[3];
int i, error, ntries;
RAL_LOCK_ASSERT(sc);
if ((sc->sc_flags & RAL_FW_LOADED) == 0) {
error = rt2661_load_microcode(sc);
if (error != 0) {
- if_printf(ifp,
+ device_printf(sc->sc_dev,
"%s: could not load 8051 microcode, error %d\n",
__func__, error);
return;
}
sc->sc_flags |= RAL_FW_LOADED;
}
rt2661_stop_locked(sc);
/* initialize Tx rings */
RAL_WRITE(sc, RT2661_AC1_BASE_CSR, sc->txq[1].physaddr);
RAL_WRITE(sc, RT2661_AC0_BASE_CSR, sc->txq[0].physaddr);
RAL_WRITE(sc, RT2661_AC2_BASE_CSR, sc->txq[2].physaddr);
RAL_WRITE(sc, RT2661_AC3_BASE_CSR, sc->txq[3].physaddr);
/* initialize Mgt ring */
RAL_WRITE(sc, RT2661_MGT_BASE_CSR, sc->mgtq.physaddr);
/* initialize Rx ring */
RAL_WRITE(sc, RT2661_RX_BASE_CSR, sc->rxq.physaddr);
/* initialize Tx rings sizes */
RAL_WRITE(sc, RT2661_TX_RING_CSR0,
RT2661_TX_RING_COUNT << 24 |
RT2661_TX_RING_COUNT << 16 |
RT2661_TX_RING_COUNT << 8 |
RT2661_TX_RING_COUNT);
RAL_WRITE(sc, RT2661_TX_RING_CSR1,
RT2661_TX_DESC_WSIZE << 16 |
RT2661_TX_RING_COUNT << 8 | /* XXX: HCCA ring unused */
RT2661_MGT_RING_COUNT);
/* initialize Rx rings */
RAL_WRITE(sc, RT2661_RX_RING_CSR,
RT2661_RX_DESC_BACK << 16 |
RT2661_RX_DESC_WSIZE << 8 |
RT2661_RX_RING_COUNT);
/* XXX: some magic here */
RAL_WRITE(sc, RT2661_TX_DMA_DST_CSR, 0xaa);
/* load base addresses of all 5 Tx rings (4 data + 1 mgt) */
RAL_WRITE(sc, RT2661_LOAD_TX_RING_CSR, 0x1f);
/* load base address of Rx ring */
RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 2);
/* initialize MAC registers to default values */
for (i = 0; i < N(rt2661_def_mac); i++)
RAL_WRITE(sc, rt2661_def_mac[i].reg, rt2661_def_mac[i].val);
- rt2661_set_macaddr(sc, IF_LLADDR(ifp));
+ rt2661_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
/* set host ready */
RAL_WRITE(sc, RT2661_MAC_CSR1, 3);
RAL_WRITE(sc, RT2661_MAC_CSR1, 0);
/* wait for BBP/RF to wakeup */
for (ntries = 0; ntries < 1000; ntries++) {
if (RAL_READ(sc, RT2661_MAC_CSR12) & 8)
break;
DELAY(1000);
}
if (ntries == 1000) {
printf("timeout waiting for BBP/RF to wakeup\n");
rt2661_stop_locked(sc);
return;
}
if (rt2661_bbp_init(sc) != 0) {
rt2661_stop_locked(sc);
return;
}
/* select default channel */
sc->sc_curchan = ic->ic_curchan;
rt2661_select_band(sc, sc->sc_curchan);
rt2661_select_antenna(sc);
rt2661_set_chan(sc, sc->sc_curchan);
/* update Rx filter */
tmp = RAL_READ(sc, RT2661_TXRX_CSR0) & 0xffff;
tmp |= RT2661_DROP_PHY_ERROR | RT2661_DROP_CRC_ERROR;
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
tmp |= RT2661_DROP_CTL | RT2661_DROP_VER_ERROR |
RT2661_DROP_ACKCTS;
if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
ic->ic_opmode != IEEE80211_M_MBSS)
tmp |= RT2661_DROP_TODS;
- if (!(ifp->if_flags & IFF_PROMISC))
+ if (ic->ic_promisc == 0)
tmp |= RT2661_DROP_NOT_TO_ME;
}
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp);
/* clear STA registers */
RAL_READ_REGION_4(sc, RT2661_STA_CSR0, sta, N(sta));
/* initialize ASIC */
RAL_WRITE(sc, RT2661_MAC_CSR1, 4);
/* clear any pending interrupt */
RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff);
/* enable interrupts */
RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10);
RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0);
/* kick Rx */
RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 1);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_flags |= RAL_RUNNING;
callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc);
#undef N
}
static void
rt2661_init(void *priv)
{
struct rt2661_softc *sc = priv;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
RAL_LOCK(sc);
rt2661_init_locked(sc);
RAL_UNLOCK(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_flags & RAL_RUNNING)
ieee80211_start_all(ic); /* start all vap's */
}
void
rt2661_stop_locked(struct rt2661_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- uint32_t tmp;
volatile int *flags = &sc->sc_flags;
+ uint32_t tmp;
while (*flags & RAL_INPUT_RUNNING)
msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10);
callout_stop(&sc->watchdog_ch);
sc->sc_tx_timer = 0;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ if (sc->sc_flags & RAL_RUNNING) {
+ sc->sc_flags &= ~RAL_RUNNING;
/* abort Tx (for all 5 Tx rings) */
RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 0x1f << 16);
/* disable Rx (value remains after reset!) */
tmp = RAL_READ(sc, RT2661_TXRX_CSR0);
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX);
/* reset ASIC */
RAL_WRITE(sc, RT2661_MAC_CSR1, 3);
RAL_WRITE(sc, RT2661_MAC_CSR1, 0);
/* disable interrupts */
RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffffff);
RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff);
/* clear any pending interrupt */
RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff);
RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, 0xffffffff);
/* reset Tx and Rx rings */
rt2661_reset_tx_ring(sc, &sc->txq[0]);
rt2661_reset_tx_ring(sc, &sc->txq[1]);
rt2661_reset_tx_ring(sc, &sc->txq[2]);
rt2661_reset_tx_ring(sc, &sc->txq[3]);
rt2661_reset_tx_ring(sc, &sc->mgtq);
rt2661_reset_rx_ring(sc, &sc->rxq);
}
}
void
rt2661_stop(void *priv)
{
struct rt2661_softc *sc = priv;
RAL_LOCK(sc);
rt2661_stop_locked(sc);
RAL_UNLOCK(sc);
}
static int
rt2661_load_microcode(struct rt2661_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
const struct firmware *fp;
const char *imagename;
int ntries, error;
RAL_LOCK_ASSERT(sc);
switch (sc->sc_id) {
case 0x0301: imagename = "rt2561sfw"; break;
case 0x0302: imagename = "rt2561fw"; break;
case 0x0401: imagename = "rt2661fw"; break;
default:
- if_printf(ifp, "%s: unexpected pci device id 0x%x, "
+ device_printf(sc->sc_dev, "%s: unexpected pci device id 0x%x, "
"don't know how to retrieve firmware\n",
__func__, sc->sc_id);
return EINVAL;
}
RAL_UNLOCK(sc);
fp = firmware_get(imagename);
RAL_LOCK(sc);
if (fp == NULL) {
- if_printf(ifp, "%s: unable to retrieve firmware image %s\n",
+ device_printf(sc->sc_dev,
+ "%s: unable to retrieve firmware image %s\n",
__func__, imagename);
return EINVAL;
}
/*
* Load 8051 microcode into NIC.
*/
/* reset 8051 */
RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET);
/* cancel any pending Host to MCU command */
RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, 0);
RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff);
RAL_WRITE(sc, RT2661_HOST_CMD_CSR, 0);
/* write 8051's microcode */
RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET | RT2661_MCU_SEL);
RAL_WRITE_REGION_1(sc, RT2661_MCU_CODE_BASE, fp->data, fp->datasize);
RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET);
/* kick 8051's ass */
RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, 0);
/* wait for 8051 to initialize */
for (ntries = 0; ntries < 500; ntries++) {
if (RAL_READ(sc, RT2661_MCU_CNTL_CSR) & RT2661_MCU_READY)
break;
DELAY(100);
}
if (ntries == 500) {
- if_printf(ifp, "%s: timeout waiting for MCU to initialize\n",
- __func__);
+ device_printf(sc->sc_dev,
+ "%s: timeout waiting for MCU to initialize\n", __func__);
error = EIO;
} else
error = 0;
firmware_put(fp, FIRMWARE_UNLOAD);
return error;
}
#ifdef notyet
/*
* Dynamically tune Rx sensitivity (BBP register 17) based on average RSSI and
* false CCA count. This function is called periodically (every seconds) when
* in the RUN state. Values taken from the reference driver.
*/
static void
rt2661_rx_tune(struct rt2661_softc *sc)
{
uint8_t bbp17;
uint16_t cca;
int lo, hi, dbm;
/*
* Tuning range depends on operating band and on the presence of an
* external low-noise amplifier.
*/
lo = 0x20;
if (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan))
lo += 0x08;
if ((IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan) && sc->ext_2ghz_lna) ||
(IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan) && sc->ext_5ghz_lna))
lo += 0x10;
hi = lo + 0x20;
/* retrieve false CCA count since last call (clear on read) */
cca = RAL_READ(sc, RT2661_STA_CSR1) & 0xffff;
if (dbm >= -35) {
bbp17 = 0x60;
} else if (dbm >= -58) {
bbp17 = hi;
} else if (dbm >= -66) {
bbp17 = lo + 0x10;
} else if (dbm >= -74) {
bbp17 = lo + 0x08;
} else {
/* RSSI < -74dBm, tune using false CCA count */
bbp17 = sc->bbp17; /* current value */
hi -= 2 * (-74 - dbm);
if (hi < lo)
hi = lo;
if (bbp17 > hi) {
bbp17 = hi;
} else if (cca > 512) {
if (++bbp17 > hi)
bbp17 = hi;
} else if (cca < 100) {
if (--bbp17 < lo)
bbp17 = lo;
}
}
if (bbp17 != sc->bbp17) {
rt2661_bbp_write(sc, 17, bbp17);
sc->bbp17 = bbp17;
}
}
/*
* Enter/Leave radar detection mode.
* This is for 802.11h additional regulatory domains.
*/
static void
rt2661_radar_start(struct rt2661_softc *sc)
{
uint32_t tmp;
/* disable Rx */
tmp = RAL_READ(sc, RT2661_TXRX_CSR0);
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX);
rt2661_bbp_write(sc, 82, 0x20);
rt2661_bbp_write(sc, 83, 0x00);
rt2661_bbp_write(sc, 84, 0x40);
/* save current BBP registers values */
sc->bbp18 = rt2661_bbp_read(sc, 18);
sc->bbp21 = rt2661_bbp_read(sc, 21);
sc->bbp22 = rt2661_bbp_read(sc, 22);
sc->bbp16 = rt2661_bbp_read(sc, 16);
sc->bbp17 = rt2661_bbp_read(sc, 17);
sc->bbp64 = rt2661_bbp_read(sc, 64);
rt2661_bbp_write(sc, 18, 0xff);
rt2661_bbp_write(sc, 21, 0x3f);
rt2661_bbp_write(sc, 22, 0x3f);
rt2661_bbp_write(sc, 16, 0xbd);
rt2661_bbp_write(sc, 17, sc->ext_5ghz_lna ? 0x44 : 0x34);
rt2661_bbp_write(sc, 64, 0x21);
/* restore Rx filter */
RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp);
}
static int
rt2661_radar_stop(struct rt2661_softc *sc)
{
uint8_t bbp66;
/* read radar detection result */
bbp66 = rt2661_bbp_read(sc, 66);
/* restore BBP registers values */
rt2661_bbp_write(sc, 16, sc->bbp16);
rt2661_bbp_write(sc, 17, sc->bbp17);
rt2661_bbp_write(sc, 18, sc->bbp18);
rt2661_bbp_write(sc, 21, sc->bbp21);
rt2661_bbp_write(sc, 22, sc->bbp22);
rt2661_bbp_write(sc, 64, sc->bbp64);
return bbp66 == 1;
}
#endif
static int
rt2661_prepare_beacon(struct rt2661_softc *sc, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_beacon_offsets bo;
struct rt2661_tx_desc desc;
struct mbuf *m0;
int rate;
m0 = ieee80211_beacon_alloc(vap->iv_bss, &bo);
if (m0 == NULL) {
device_printf(sc->sc_dev, "could not allocate beacon frame\n");
return ENOBUFS;
}
/* send beacons at the lowest available rate */
rate = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ? 12 : 2;
rt2661_setup_tx_desc(sc, &desc, RT2661_TX_TIMESTAMP, RT2661_TX_HWSEQ,
m0->m_pkthdr.len, rate, NULL, 0, RT2661_QID_MGT);
/* copy the first 24 bytes of Tx descriptor into NIC memory */
RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0, (uint8_t *)&desc, 24);
/* copy beacon header and payload into NIC memory */
RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0 + 24,
mtod(m0, uint8_t *), m0->m_pkthdr.len);
m_freem(m0);
return 0;
}
/*
* Enable TSF synchronization and tell h/w to start sending beacons for IBSS
* and HostAP operating modes.
*/
static void
rt2661_enable_tsf_sync(struct rt2661_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
if (vap->iv_opmode != IEEE80211_M_STA) {
/*
* Change default 16ms TBTT adjustment to 8ms.
* Must be done before enabling beacon generation.
*/
RAL_WRITE(sc, RT2661_TXRX_CSR10, 1 << 12 | 8);
}
tmp = RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000;
/* set beacon interval (in 1/16ms unit) */
tmp |= vap->iv_bss->ni_intval * 16;
tmp |= RT2661_TSF_TICKING | RT2661_ENABLE_TBTT;
if (vap->iv_opmode == IEEE80211_M_STA)
tmp |= RT2661_TSF_MODE(1);
else
tmp |= RT2661_TSF_MODE(2) | RT2661_GENERATE_BEACON;
RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp);
}
static void
rt2661_enable_tsf(struct rt2661_softc *sc)
{
RAL_WRITE(sc, RT2661_TXRX_CSR9,
(RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000)
| RT2661_TSF_TICKING | RT2661_TSF_MODE(2));
}
/*
* Retrieve the "Received Signal Strength Indicator" from the raw values
* contained in Rx descriptors. The computation depends on which band the
* frame was received. Correction values taken from the reference driver.
*/
static int
rt2661_get_rssi(struct rt2661_softc *sc, uint8_t raw)
{
int lna, agc, rssi;
lna = (raw >> 5) & 0x3;
agc = raw & 0x1f;
if (lna == 0) {
/*
* No mapping available.
*
* NB: Since RSSI is relative to noise floor, -1 is
* adequate for caller to know error happened.
*/
return -1;
}
rssi = (2 * agc) - RT2661_NOISE_FLOOR;
if (IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan)) {
rssi += sc->rssi_2ghz_corr;
if (lna == 1)
rssi -= 64;
else if (lna == 2)
rssi -= 74;
else if (lna == 3)
rssi -= 90;
} else {
rssi += sc->rssi_5ghz_corr;
if (lna == 1)
rssi -= 64;
else if (lna == 2)
rssi -= 86;
else if (lna == 3)
rssi -= 100;
}
return rssi;
}
static void
rt2661_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2661_softc *sc = ifp->if_softc;
+ struct rt2661_softc *sc = ic->ic_softc;
uint32_t tmp;
/* abort TSF synchronization */
tmp = RAL_READ(sc, RT2661_TXRX_CSR9);
RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0xffffff);
- rt2661_set_bssid(sc, ifp->if_broadcastaddr);
+ rt2661_set_bssid(sc, ieee80211broadcastaddr);
}
static void
rt2661_scan_end(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2661_softc *sc = ifp->if_softc;
+ struct rt2661_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
rt2661_enable_tsf_sync(sc);
/* XXX keep local copy */
rt2661_set_bssid(sc, vap->iv_bss->ni_bssid);
}
static void
rt2661_set_channel(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2661_softc *sc = ifp->if_softc;
+ struct rt2661_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
rt2661_set_chan(sc, ic->ic_curchan);
RAL_UNLOCK(sc);
}
Index: head/sys/dev/ral/rt2661var.h
===================================================================
--- head/sys/dev/ral/rt2661var.h (revision 287196)
+++ head/sys/dev/ral/rt2661var.h (revision 287197)
@@ -1,173 +1,174 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2005
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
struct rt2661_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsf;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
} __packed;
#define RT2661_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
struct rt2661_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed;
#define RT2661_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct rt2661_tx_data {
bus_dmamap_t map;
struct mbuf *m;
struct ieee80211_node *ni;
uint8_t rix;
int8_t rssi;
};
struct rt2661_tx_ring {
bus_dma_tag_t desc_dmat;
bus_dma_tag_t data_dmat;
bus_dmamap_t desc_map;
bus_addr_t physaddr;
struct rt2661_tx_desc *desc;
struct rt2661_tx_data *data;
int count;
int queued;
int cur;
int next;
int stat;
};
struct rt2661_rx_data {
bus_dmamap_t map;
struct mbuf *m;
};
struct rt2661_rx_ring {
bus_dma_tag_t desc_dmat;
bus_dma_tag_t data_dmat;
bus_dmamap_t desc_map;
bus_addr_t physaddr;
struct rt2661_rx_desc *desc;
struct rt2661_rx_data *data;
int count;
int cur;
int next;
};
struct rt2661_vap {
struct ieee80211vap ral_vap;
int (*ral_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define RT2661_VAP(vap) ((struct rt2661_vap *)(vap))
struct rt2661_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mtx sc_mtx;
+ struct mbufq sc_snd;
device_t sc_dev;
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
- struct mtx sc_mtx;
-
struct callout watchdog_ch;
int sc_tx_timer;
int sc_invalid;
int sc_debug;
/*
* The same in both up to here
* ------------------------------------------------
*/
int sc_flags;
#define RAL_FW_LOADED 0x1
#define RAL_INPUT_RUNNING 0x2
+#define RAL_RUNNING 0x4
int sc_id;
struct ieee80211_channel *sc_curchan;
uint8_t rf_rev;
uint8_t rfprog;
uint8_t rffreq;
struct rt2661_tx_ring txq[4];
struct rt2661_tx_ring mgtq;
struct rt2661_rx_ring rxq;
uint32_t rf_regs[4];
int8_t txpow[38];
struct {
uint8_t reg;
uint8_t val;
} bbp_prom[16];
int hw_radio;
int rx_ant;
int tx_ant;
int nb_ant;
int ext_2ghz_lna;
int ext_5ghz_lna;
int rssi_2ghz_corr;
int rssi_5ghz_corr;
uint8_t bbp18;
uint8_t bbp21;
uint8_t bbp22;
uint8_t bbp16;
uint8_t bbp17;
uint8_t bbp64;
int dwelltime;
struct rt2661_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct rt2661_tx_radiotap_header sc_txtap;
int sc_txtap_len;
};
int rt2661_attach(device_t, int);
int rt2661_detach(void *);
void rt2661_shutdown(void *);
void rt2661_suspend(void *);
void rt2661_resume(void *);
void rt2661_intr(void *);
#define RAL_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define RAL_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
#define RAL_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
Index: head/sys/dev/ral/rt2860.c
===================================================================
--- head/sys/dev/ral/rt2860.c (revision 287196)
+++ head/sys/dev/ral/rt2860.c (revision 287197)
@@ -1,4399 +1,4337 @@
/*-
* Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2012 Bernhard Schmidt <bschmidt@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* $OpenBSD: rt2860.c,v 1.65 2010/10/23 14:24:54 damien Exp $
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Ralink Technology RT2860/RT3090/RT3390/RT3562/RT5390/RT5392 chipset driver
* http://www.ralinktech.com/
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <dev/ral/rt2860reg.h>
#include <dev/ral/rt2860var.h>
#define RAL_DEBUG
#ifdef RAL_DEBUG
#define DPRINTF(x) do { if (sc->sc_debug > 0) printf x; } while (0)
#define DPRINTFN(n, x) do { if (sc->sc_debug >= (n)) printf x; } while (0)
#else
#define DPRINTF(x)
#define DPRINTFN(n, x)
#endif
static struct ieee80211vap *rt2860_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode,
int, const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void rt2860_vap_delete(struct ieee80211vap *);
static void rt2860_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int rt2860_alloc_tx_ring(struct rt2860_softc *,
struct rt2860_tx_ring *);
static void rt2860_reset_tx_ring(struct rt2860_softc *,
struct rt2860_tx_ring *);
static void rt2860_free_tx_ring(struct rt2860_softc *,
struct rt2860_tx_ring *);
static int rt2860_alloc_tx_pool(struct rt2860_softc *);
static void rt2860_free_tx_pool(struct rt2860_softc *);
static int rt2860_alloc_rx_ring(struct rt2860_softc *,
struct rt2860_rx_ring *);
static void rt2860_reset_rx_ring(struct rt2860_softc *,
struct rt2860_rx_ring *);
static void rt2860_free_rx_ring(struct rt2860_softc *,
struct rt2860_rx_ring *);
static void rt2860_updatestats(struct rt2860_softc *);
static void rt2860_newassoc(struct ieee80211_node *, int);
static void rt2860_node_free(struct ieee80211_node *);
#ifdef IEEE80211_HT
static int rt2860_ampdu_rx_start(struct ieee80211com *,
struct ieee80211_node *, uint8_t);
static void rt2860_ampdu_rx_stop(struct ieee80211com *,
struct ieee80211_node *, uint8_t);
#endif
static int rt2860_newstate(struct ieee80211vap *, enum ieee80211_state,
int);
static uint16_t rt3090_efuse_read_2(struct rt2860_softc *, uint16_t);
static uint16_t rt2860_eeprom_read_2(struct rt2860_softc *, uint16_t);
static void rt2860_intr_coherent(struct rt2860_softc *);
static void rt2860_drain_stats_fifo(struct rt2860_softc *);
static void rt2860_tx_intr(struct rt2860_softc *, int);
static void rt2860_rx_intr(struct rt2860_softc *);
static void rt2860_tbtt_intr(struct rt2860_softc *);
static void rt2860_gp_intr(struct rt2860_softc *);
static int rt2860_tx(struct rt2860_softc *, struct mbuf *,
struct ieee80211_node *);
static int rt2860_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static int rt2860_tx_raw(struct rt2860_softc *, struct mbuf *,
struct ieee80211_node *,
const struct ieee80211_bpf_params *params);
-static void rt2860_start(struct ifnet *);
-static void rt2860_start_locked(struct ifnet *);
+static int rt2860_transmit(struct ieee80211com *, struct mbuf *);
+static void rt2860_start(struct rt2860_softc *);
static void rt2860_watchdog(void *);
-static int rt2860_ioctl(struct ifnet *, u_long, caddr_t);
+static void rt2860_parent(struct ieee80211com *);
static void rt2860_mcu_bbp_write(struct rt2860_softc *, uint8_t, uint8_t);
static uint8_t rt2860_mcu_bbp_read(struct rt2860_softc *, uint8_t);
static void rt2860_rf_write(struct rt2860_softc *, uint8_t, uint32_t);
static uint8_t rt3090_rf_read(struct rt2860_softc *, uint8_t);
static void rt3090_rf_write(struct rt2860_softc *, uint8_t, uint8_t);
static int rt2860_mcu_cmd(struct rt2860_softc *, uint8_t, uint16_t, int);
static void rt2860_enable_mrr(struct rt2860_softc *);
static void rt2860_set_txpreamble(struct rt2860_softc *);
static void rt2860_set_basicrates(struct rt2860_softc *,
const struct ieee80211_rateset *);
static void rt2860_scan_start(struct ieee80211com *);
static void rt2860_scan_end(struct ieee80211com *);
static void rt2860_set_channel(struct ieee80211com *);
static void rt2860_select_chan_group(struct rt2860_softc *, int);
static void rt2860_set_chan(struct rt2860_softc *, u_int);
static void rt3090_set_chan(struct rt2860_softc *, u_int);
static void rt5390_set_chan(struct rt2860_softc *, u_int);
static int rt3090_rf_init(struct rt2860_softc *);
static void rt5390_rf_init(struct rt2860_softc *);
static void rt3090_rf_wakeup(struct rt2860_softc *);
static void rt5390_rf_wakeup(struct rt2860_softc *);
static int rt3090_filter_calib(struct rt2860_softc *, uint8_t, uint8_t,
uint8_t *);
static void rt3090_rf_setup(struct rt2860_softc *);
static void rt2860_set_leds(struct rt2860_softc *, uint16_t);
static void rt2860_set_gp_timer(struct rt2860_softc *, int);
static void rt2860_set_bssid(struct rt2860_softc *, const uint8_t *);
static void rt2860_set_macaddr(struct rt2860_softc *, const uint8_t *);
static void rt2860_update_promisc(struct ieee80211com *);
static void rt2860_updateslot(struct ieee80211com *);
-static void rt2860_updateprot(struct ifnet *);
+static void rt2860_updateprot(struct rt2860_softc *);
static int rt2860_updateedca(struct ieee80211com *);
#ifdef HW_CRYPTO
static int rt2860_set_key(struct ieee80211com *, struct ieee80211_node *,
struct ieee80211_key *);
static void rt2860_delete_key(struct ieee80211com *,
struct ieee80211_node *, struct ieee80211_key *);
#endif
static int8_t rt2860_rssi2dbm(struct rt2860_softc *, uint8_t, uint8_t);
static const char *rt2860_get_rf(uint8_t);
static int rt2860_read_eeprom(struct rt2860_softc *,
uint8_t macaddr[IEEE80211_ADDR_LEN]);
static int rt2860_bbp_init(struct rt2860_softc *);
static void rt5390_bbp_init(struct rt2860_softc *);
static int rt2860_txrx_enable(struct rt2860_softc *);
static void rt2860_init(void *);
static void rt2860_init_locked(struct rt2860_softc *);
static void rt2860_stop(void *);
static void rt2860_stop_locked(struct rt2860_softc *);
static int rt2860_load_microcode(struct rt2860_softc *);
#ifdef NOT_YET
static void rt2860_calib(struct rt2860_softc *);
#endif
static void rt3090_set_rx_antenna(struct rt2860_softc *, int);
static void rt2860_switch_chan(struct rt2860_softc *,
struct ieee80211_channel *);
static int rt2860_setup_beacon(struct rt2860_softc *,
struct ieee80211vap *);
static void rt2860_enable_tsf_sync(struct rt2860_softc *);
static const struct {
uint32_t reg;
uint32_t val;
} rt2860_def_mac[] = {
RT2860_DEF_MAC
};
static const struct {
uint8_t reg;
uint8_t val;
} rt2860_def_bbp[] = {
RT2860_DEF_BBP
}, rt5390_def_bbp[] = {
RT5390_DEF_BBP
};
static const struct rfprog {
uint8_t chan;
uint32_t r1, r2, r3, r4;
} rt2860_rf2850[] = {
RT2860_RF2850
};
struct {
uint8_t n, r, k;
} rt3090_freqs[] = {
RT3070_RF3052
};
static const struct {
uint8_t reg;
uint8_t val;
} rt3090_def_rf[] = {
RT3070_DEF_RF
}, rt5390_def_rf[] = {
RT5390_DEF_RF
}, rt5392_def_rf[] = {
RT5392_DEF_RF
};
int
rt2860_attach(device_t dev, int id)
{
struct rt2860_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic;
- struct ifnet *ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
int error, ntries, qid;
uint8_t bands;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
sc->sc_dev = dev;
sc->sc_debug = 0;
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- return ENOMEM;
- }
- ic = ifp->if_l2com;
-
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF | MTX_RECURSE);
callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
/* wait for NIC to initialize */
for (ntries = 0; ntries < 100; ntries++) {
tmp = RAL_READ(sc, RT2860_ASIC_VER_ID);
if (tmp != 0 && tmp != 0xffffffff)
break;
DELAY(10);
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"timeout waiting for NIC to initialize\n");
error = EIO;
goto fail1;
}
sc->mac_ver = tmp >> 16;
sc->mac_rev = tmp & 0xffff;
if (sc->mac_ver != 0x2860 &&
(id == 0x0681 || id == 0x0781 || id == 0x1059))
sc->sc_flags |= RT2860_ADVANCED_PS;
/* retrieve RF rev. no and various other things from EEPROM */
- rt2860_read_eeprom(sc, macaddr);
+ rt2860_read_eeprom(sc, ic->ic_macaddr);
device_printf(sc->sc_dev, "MAC/BBP RT%X (rev 0x%04X), "
"RF %s (MIMO %dT%dR), address %6D\n",
sc->mac_ver, sc->mac_rev, rt2860_get_rf(sc->rf_rev),
- sc->ntxchains, sc->nrxchains, macaddr, ":");
+ sc->ntxchains, sc->nrxchains, ic->ic_macaddr, ":");
/*
* Allocate Tx (4 EDCAs + HCCA + Mgt) and Rx rings.
*/
for (qid = 0; qid < 6; qid++) {
if ((error = rt2860_alloc_tx_ring(sc, &sc->txq[qid])) != 0) {
device_printf(sc->sc_dev,
"could not allocate Tx ring %d\n", qid);
goto fail2;
}
}
if ((error = rt2860_alloc_rx_ring(sc, &sc->rxq)) != 0) {
device_printf(sc->sc_dev, "could not allocate Rx ring\n");
goto fail2;
}
if ((error = rt2860_alloc_tx_pool(sc)) != 0) {
device_printf(sc->sc_dev, "could not allocate Tx pool\n");
goto fail3;
}
/* mgmt ring is broken on RT2860C, use EDCA AC VO ring instead */
sc->mgtqid = (sc->mac_ver == 0x2860 && sc->mac_rev == 0x0100) ?
WME_AC_VO : 5;
- ifp->if_softc = sc;
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = rt2860_init;
- ifp->if_ioctl = rt2860_ioctl;
- ifp->if_start = rt2860_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode */
| IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
| IEEE80211_C_HOSTAP /* hostap mode */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
| IEEE80211_C_WDS /* 4-address traffic works */
| IEEE80211_C_MBSS /* mesh point link mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WPA /* capable of WPA1+WPA2 */
#if 0
| IEEE80211_C_BGSCAN /* capable of bg scanning */
#endif
| IEEE80211_C_WME /* 802.11e */
;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
if (sc->rf_rev == RT2860_RF_2750 || sc->rf_rev == RT2860_RF_2850)
setbit(&bands, IEEE80211_MODE_11A);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
ic->ic_wme.wme_update = rt2860_updateedca;
ic->ic_scan_start = rt2860_scan_start;
ic->ic_scan_end = rt2860_scan_end;
ic->ic_set_channel = rt2860_set_channel;
ic->ic_updateslot = rt2860_updateslot;
ic->ic_update_promisc = rt2860_update_promisc;
ic->ic_raw_xmit = rt2860_raw_xmit;
sc->sc_node_free = ic->ic_node_free;
ic->ic_node_free = rt2860_node_free;
ic->ic_newassoc = rt2860_newassoc;
-
+ ic->ic_transmit = rt2860_transmit;
+ ic->ic_parent = rt2860_parent;
ic->ic_vap_create = rt2860_vap_create;
ic->ic_vap_delete = rt2860_vap_delete;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
RT2860_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
RT2860_RX_RADIOTAP_PRESENT);
#ifdef RAL_DEBUG
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs");
#endif
if (bootverbose)
ieee80211_announce(ic);
return 0;
fail3: rt2860_free_rx_ring(sc, &sc->rxq);
fail2: while (--qid >= 0)
rt2860_free_tx_ring(sc, &sc->txq[qid]);
fail1: mtx_destroy(&sc->sc_mtx);
- if_free(ifp);
return error;
}
int
rt2860_detach(void *xsc)
{
struct rt2860_softc *sc = xsc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int qid;
RAL_LOCK(sc);
rt2860_stop_locked(sc);
RAL_UNLOCK(sc);
ieee80211_ifdetach(ic);
-
+ mbufq_drain(&sc->sc_snd);
for (qid = 0; qid < 6; qid++)
rt2860_free_tx_ring(sc, &sc->txq[qid]);
rt2860_free_rx_ring(sc, &sc->rxq);
rt2860_free_tx_pool(sc);
- if_free(ifp);
-
mtx_destroy(&sc->sc_mtx);
return 0;
}
void
rt2860_shutdown(void *xsc)
{
struct rt2860_softc *sc = xsc;
rt2860_stop(sc);
}
void
rt2860_suspend(void *xsc)
{
struct rt2860_softc *sc = xsc;
rt2860_stop(sc);
}
void
rt2860_resume(void *xsc)
{
struct rt2860_softc *sc = xsc;
- struct ifnet *ifp = sc->sc_ifp;
- if (ifp->if_flags & IFF_UP)
+ if (sc->sc_ic.ic_nrunning > 0)
rt2860_init(sc);
}
static struct ieee80211vap *
rt2860_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
- struct ifnet *ifp = ic->ic_ifp;
+ struct rt2860_softc *sc = ic->ic_softc;
struct rt2860_vap *rvp;
struct ieee80211vap *vap;
switch (opmode) {
case IEEE80211_M_STA:
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
case IEEE80211_M_MONITOR:
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
/* XXXRP: TBD */
if (!TAILQ_EMPTY(&ic->ic_vaps)) {
- if_printf(ifp, "only 1 vap supported\n");
+ device_printf(sc->sc_dev, "only 1 vap supported\n");
return NULL;
}
if (opmode == IEEE80211_M_STA)
flags |= IEEE80211_CLONE_NOBEACONS;
break;
case IEEE80211_M_WDS:
if (TAILQ_EMPTY(&ic->ic_vaps) ||
ic->ic_opmode != IEEE80211_M_HOSTAP) {
- if_printf(ifp, "wds only supported in ap mode\n");
+ device_printf(sc->sc_dev,
+ "wds only supported in ap mode\n");
return NULL;
}
/*
* Silently remove any request for a unique
* bssid; WDS vap's always share the local
* mac address.
*/
flags &= ~IEEE80211_CLONE_BSSID;
break;
default:
- if_printf(ifp, "unknown opmode %d\n", opmode);
+ device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
return NULL;
}
- rvp = malloc(sizeof(struct rt2860_vap), M_80211_VAP, M_NOWAIT | M_ZERO);
- if (rvp == NULL)
- return NULL;
+ rvp = malloc(sizeof(struct rt2860_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &rvp->ral_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
/* override state transition machine */
rvp->ral_newstate = vap->iv_newstate;
vap->iv_newstate = rt2860_newstate;
#if 0
vap->iv_update_beacon = rt2860_beacon_update;
#endif
/* HW supports up to 255 STAs (0-254) in HostAP and IBSS modes */
vap->iv_max_aid = min(IEEE80211_AID_MAX, RT2860_WCID_MAX);
ieee80211_ratectl_init(vap);
/* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, ieee80211_media_change,
+ ieee80211_media_status, mac);
if (TAILQ_FIRST(&ic->ic_vaps) == vap)
ic->ic_opmode = opmode;
return vap;
}
static void
rt2860_vap_delete(struct ieee80211vap *vap)
{
struct rt2860_vap *rvp = RT2860_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(rvp, M_80211_VAP);
}
static void
rt2860_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
if (error != 0)
return;
KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
rt2860_alloc_tx_ring(struct rt2860_softc *sc, struct rt2860_tx_ring *ring)
{
int size, error;
size = RT2860_TX_RING_COUNT * sizeof (struct rt2860_txd);
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 16, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
size, 1, size, 0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA map\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->txd,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->txd,
size, rt2860_dma_map_addr, &ring->paddr, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
return 0;
fail: rt2860_free_tx_ring(sc, ring);
return error;
}
void
rt2860_reset_tx_ring(struct rt2860_softc *sc, struct rt2860_tx_ring *ring)
{
struct rt2860_tx_data *data;
int i;
for (i = 0; i < RT2860_TX_RING_COUNT; i++) {
if ((data = ring->data[i]) == NULL)
continue; /* nothing mapped in this slot */
if (data->m != NULL) {
bus_dmamap_sync(sc->txwi_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txwi_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
SLIST_INSERT_HEAD(&sc->data_pool, data, next);
ring->data[i] = NULL;
}
ring->queued = 0;
ring->cur = ring->next = 0;
}
void
rt2860_free_tx_ring(struct rt2860_softc *sc, struct rt2860_tx_ring *ring)
{
struct rt2860_tx_data *data;
int i;
if (ring->txd != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->txd, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
for (i = 0; i < RT2860_TX_RING_COUNT; i++) {
if ((data = ring->data[i]) == NULL)
continue; /* nothing mapped in this slot */
if (data->m != NULL) {
bus_dmamap_sync(sc->txwi_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txwi_dmat, data->map);
m_freem(data->m);
}
if (data->ni != NULL)
ieee80211_free_node(data->ni);
SLIST_INSERT_HEAD(&sc->data_pool, data, next);
}
}
/*
* Allocate a pool of TX Wireless Information blocks.
*/
int
rt2860_alloc_tx_pool(struct rt2860_softc *sc)
{
caddr_t vaddr;
bus_addr_t paddr;
int i, size, error;
size = RT2860_TX_POOL_COUNT * RT2860_TXWI_DMASZ;
/* init data_pool early in case of failure.. */
SLIST_INIT(&sc->data_pool);
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
size, 1, size, 0, NULL, NULL, &sc->txwi_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create txwi DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(sc->txwi_dmat, (void **)&sc->txwi_vaddr,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->txwi_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(sc->txwi_dmat, sc->txwi_map,
sc->txwi_vaddr, size, rt2860_dma_map_addr, &paddr, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load txwi DMA map\n");
goto fail;
}
bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_PREWRITE);
vaddr = sc->txwi_vaddr;
for (i = 0; i < RT2860_TX_POOL_COUNT; i++) {
struct rt2860_tx_data *data = &sc->data[i];
error = bus_dmamap_create(sc->txwi_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
data->txwi = (struct rt2860_txwi *)vaddr;
data->paddr = paddr;
vaddr += RT2860_TXWI_DMASZ;
paddr += RT2860_TXWI_DMASZ;
SLIST_INSERT_HEAD(&sc->data_pool, data, next);
}
return 0;
fail: rt2860_free_tx_pool(sc);
return error;
}
void
rt2860_free_tx_pool(struct rt2860_softc *sc)
{
if (sc->txwi_vaddr != NULL) {
bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txwi_dmat, sc->txwi_map);
bus_dmamem_free(sc->txwi_dmat, sc->txwi_vaddr, sc->txwi_map);
}
if (sc->txwi_dmat != NULL)
bus_dma_tag_destroy(sc->txwi_dmat);
while (!SLIST_EMPTY(&sc->data_pool)) {
struct rt2860_tx_data *data;
data = SLIST_FIRST(&sc->data_pool);
bus_dmamap_destroy(sc->txwi_dmat, data->map);
SLIST_REMOVE_HEAD(&sc->data_pool, next);
}
}
int
rt2860_alloc_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring)
{
bus_addr_t physaddr;
int i, size, error;
size = RT2860_RX_RING_COUNT * sizeof (struct rt2860_rxd);
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 16, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
size, 1, size, 0, NULL, NULL, &ring->desc_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create desc DMA tag\n");
goto fail;
}
error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->rxd,
BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate DMA memory\n");
goto fail;
}
error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->rxd,
size, rt2860_dma_map_addr, &ring->paddr, 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not load desc DMA map\n");
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev, "could not create data DMA tag\n");
goto fail;
}
for (i = 0; i < RT2860_RX_RING_COUNT; i++) {
struct rt2860_rx_data *data = &ring->data[i];
struct rt2860_rxd *rxd = &ring->rxd[i];
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev, "could not create DMA map\n");
goto fail;
}
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), MCLBYTES, rt2860_dma_map_addr,
&physaddr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load rx buf DMA map");
goto fail;
}
rxd->sdp0 = htole32(physaddr);
rxd->sdl0 = htole16(MCLBYTES);
}
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
return 0;
fail: rt2860_free_rx_ring(sc, ring);
return error;
}
void
rt2860_reset_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring)
{
int i;
for (i = 0; i < RT2860_RX_RING_COUNT; i++)
ring->rxd[i].sdl0 &= ~htole16(RT2860_RX_DDONE);
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
ring->cur = 0;
}
void
rt2860_free_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring)
{
int i;
if (ring->rxd != NULL) {
bus_dmamap_sync(ring->desc_dmat, ring->desc_map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->desc_dmat, ring->desc_map);
bus_dmamem_free(ring->desc_dmat, ring->rxd, ring->desc_map);
}
if (ring->desc_dmat != NULL)
bus_dma_tag_destroy(ring->desc_dmat);
for (i = 0; i < RT2860_RX_RING_COUNT; i++) {
struct rt2860_rx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
if (ring->data_dmat != NULL)
bus_dma_tag_destroy(ring->data_dmat);
}
static void
rt2860_updatestats(struct rt2860_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
/*
* In IBSS or HostAP modes (when the hardware sends beacons), the
* MAC can run into a livelock and start sending CTS-to-self frames
* like crazy if protection is enabled. Fortunately, we can detect
* when such a situation occurs and reset the MAC.
*/
if (ic->ic_curmode != IEEE80211_M_STA) {
/* check if we're in a livelock situation.. */
uint32_t tmp = RAL_READ(sc, RT2860_DEBUG);
if ((tmp & (1 << 29)) && (tmp & (1 << 7 | 1 << 5))) {
/* ..and reset MAC/BBP for a while.. */
DPRINTF(("CTS-to-self livelock detected\n"));
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_SRST);
RAL_BARRIER_WRITE(sc);
DELAY(1);
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL,
RT2860_MAC_RX_EN | RT2860_MAC_TX_EN);
}
}
}
static void
rt2860_newassoc(struct ieee80211_node *ni, int isnew)
{
struct ieee80211com *ic = ni->ni_ic;
- struct rt2860_softc *sc = ic->ic_ifp->if_softc;
+ struct rt2860_softc *sc = ic->ic_softc;
uint8_t wcid;
wcid = IEEE80211_AID(ni->ni_associd);
if (isnew && ni->ni_associd != 0) {
sc->wcid2ni[wcid] = ni;
/* init WCID table entry */
RAL_WRITE_REGION_1(sc, RT2860_WCID_ENTRY(wcid),
ni->ni_macaddr, IEEE80211_ADDR_LEN);
}
DPRINTF(("new assoc isnew=%d addr=%s WCID=%d\n",
isnew, ether_sprintf(ni->ni_macaddr), wcid));
}
static void
rt2860_node_free(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
- struct rt2860_softc *sc = ic->ic_ifp->if_softc;
+ struct rt2860_softc *sc = ic->ic_softc;
uint8_t wcid;
if (ni->ni_associd != 0) {
wcid = IEEE80211_AID(ni->ni_associd);
/* clear Rx WCID search table entry */
RAL_SET_REGION_4(sc, RT2860_WCID_ENTRY(wcid), 0, 2);
}
sc->sc_node_free(ni);
}
#ifdef IEEE80211_HT
static int
rt2860_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
uint8_t tid)
{
struct rt2860_softc *sc = ic->ic_softc;
uint8_t wcid = ((struct rt2860_node *)ni)->wcid;
uint32_t tmp;
/* update BA session mask */
tmp = RAL_READ(sc, RT2860_WCID_ENTRY(wcid) + 4);
tmp |= (1 << tid) << 16;
RAL_WRITE(sc, RT2860_WCID_ENTRY(wcid) + 4, tmp);
return 0;
}
static void
rt2860_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
uint8_t tid)
{
struct rt2860_softc *sc = ic->ic_softc;
uint8_t wcid = ((struct rt2860_node *)ni)->wcid;
uint32_t tmp;
/* update BA session mask */
tmp = RAL_READ(sc, RT2860_WCID_ENTRY(wcid) + 4);
tmp &= ~((1 << tid) << 16);
RAL_WRITE(sc, RT2860_WCID_ENTRY(wcid) + 4, tmp);
}
#endif
int
rt2860_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct rt2860_vap *rvp = RT2860_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
- struct rt2860_softc *sc = ic->ic_ifp->if_softc;
+ struct rt2860_softc *sc = ic->ic_softc;
uint32_t tmp;
int error;
if (vap->iv_state == IEEE80211_S_RUN) {
/* turn link LED off */
rt2860_set_leds(sc, RT2860_LED_RADIO);
}
if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) {
/* abort TSF synchronization */
tmp = RAL_READ(sc, RT2860_BCN_TIME_CFG);
RAL_WRITE(sc, RT2860_BCN_TIME_CFG,
tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN |
RT2860_TBTT_TIMER_EN));
}
rt2860_set_gp_timer(sc, 0);
error = rvp->ral_newstate(vap, nstate, arg);
if (error != 0)
return (error);
if (nstate == IEEE80211_S_RUN) {
struct ieee80211_node *ni = vap->iv_bss;
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
rt2860_enable_mrr(sc);
rt2860_set_txpreamble(sc);
rt2860_set_basicrates(sc, &ni->ni_rates);
rt2860_set_bssid(sc, ni->ni_bssid);
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_MBSS) {
error = rt2860_setup_beacon(sc, vap);
if (error != 0)
return error;
}
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
rt2860_enable_tsf_sync(sc);
rt2860_set_gp_timer(sc, 500);
}
/* turn link LED on */
rt2860_set_leds(sc, RT2860_LED_RADIO |
(IEEE80211_IS_CHAN_2GHZ(ni->ni_chan) ?
RT2860_LED_LINK_2GHZ : RT2860_LED_LINK_5GHZ));
}
return error;
}
/* Read 16-bit from eFUSE ROM (>=RT3071 only.) */
static uint16_t
rt3090_efuse_read_2(struct rt2860_softc *sc, uint16_t addr)
{
uint32_t tmp;
uint16_t reg;
int ntries;
addr *= 2;
/*-
* Read one 16-byte block into registers EFUSE_DATA[0-3]:
* DATA0: F E D C
* DATA1: B A 9 8
* DATA2: 7 6 5 4
* DATA3: 3 2 1 0
*/
tmp = RAL_READ(sc, RT3070_EFUSE_CTRL);
tmp &= ~(RT3070_EFSROM_MODE_MASK | RT3070_EFSROM_AIN_MASK);
tmp |= (addr & ~0xf) << RT3070_EFSROM_AIN_SHIFT | RT3070_EFSROM_KICK;
RAL_WRITE(sc, RT3070_EFUSE_CTRL, tmp);
for (ntries = 0; ntries < 500; ntries++) {
tmp = RAL_READ(sc, RT3070_EFUSE_CTRL);
if (!(tmp & RT3070_EFSROM_KICK))
break;
DELAY(2);
}
if (ntries == 500)
return 0xffff;
if ((tmp & RT3070_EFUSE_AOUT_MASK) == RT3070_EFUSE_AOUT_MASK)
return 0xffff; /* address not found */
/* determine to which 32-bit register our 16-bit word belongs */
reg = RT3070_EFUSE_DATA3 - (addr & 0xc);
tmp = RAL_READ(sc, reg);
return (addr & 2) ? tmp >> 16 : tmp & 0xffff;
}
/*
* Read 16 bits at address 'addr' from the serial EEPROM (either 93C46,
* 93C66 or 93C86).
*/
static uint16_t
rt2860_eeprom_read_2(struct rt2860_softc *sc, uint16_t addr)
{
uint32_t tmp;
uint16_t val;
int n;
/* clock C once before the first command */
RT2860_EEPROM_CTL(sc, 0);
RT2860_EEPROM_CTL(sc, RT2860_S);
RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_C);
RT2860_EEPROM_CTL(sc, RT2860_S);
/* write start bit (1) */
RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D);
RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D | RT2860_C);
/* write READ opcode (10) */
RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D);
RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_D | RT2860_C);
RT2860_EEPROM_CTL(sc, RT2860_S);
RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_C);
/* write address (A5-A0 or A7-A0) */
n = ((RAL_READ(sc, RT2860_PCI_EECTRL) & 0x30) == 0) ? 5 : 7;
for (; n >= 0; n--) {
RT2860_EEPROM_CTL(sc, RT2860_S |
(((addr >> n) & 1) << RT2860_SHIFT_D));
RT2860_EEPROM_CTL(sc, RT2860_S |
(((addr >> n) & 1) << RT2860_SHIFT_D) | RT2860_C);
}
RT2860_EEPROM_CTL(sc, RT2860_S);
/* read data Q15-Q0 */
val = 0;
for (n = 15; n >= 0; n--) {
RT2860_EEPROM_CTL(sc, RT2860_S | RT2860_C);
tmp = RAL_READ(sc, RT2860_PCI_EECTRL);
val |= ((tmp & RT2860_Q) >> RT2860_SHIFT_Q) << n;
RT2860_EEPROM_CTL(sc, RT2860_S);
}
RT2860_EEPROM_CTL(sc, 0);
/* clear Chip Select and clock C */
RT2860_EEPROM_CTL(sc, RT2860_S);
RT2860_EEPROM_CTL(sc, 0);
RT2860_EEPROM_CTL(sc, RT2860_C);
return val;
}
static __inline uint16_t
rt2860_srom_read(struct rt2860_softc *sc, uint8_t addr)
{
/* either eFUSE ROM or EEPROM */
return sc->sc_srom_read(sc, addr);
}
static void
rt2860_intr_coherent(struct rt2860_softc *sc)
{
uint32_t tmp;
/* DMA finds data coherent event when checking the DDONE bit */
DPRINTF(("Tx/Rx Coherent interrupt\n"));
/* restart DMA engine */
tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG);
tmp &= ~(RT2860_TX_WB_DDONE | RT2860_RX_DMA_EN | RT2860_TX_DMA_EN);
RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp);
(void)rt2860_txrx_enable(sc);
}
static void
rt2860_drain_stats_fifo(struct rt2860_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
struct ieee80211_node *ni;
uint32_t stat;
int retrycnt;
uint8_t wcid, mcs, pid;
/* drain Tx status FIFO (maxsize = 16) */
while ((stat = RAL_READ(sc, RT2860_TX_STAT_FIFO)) & RT2860_TXQ_VLD) {
DPRINTFN(4, ("tx stat 0x%08x\n", stat));
wcid = (stat >> RT2860_TXQ_WCID_SHIFT) & 0xff;
ni = sc->wcid2ni[wcid];
/* if no ACK was requested, no feedback is available */
if (!(stat & RT2860_TXQ_ACKREQ) || wcid == 0xff || ni == NULL)
continue;
/* update per-STA AMRR stats */
if (stat & RT2860_TXQ_OK) {
/*
* Check if there were retries, ie if the Tx success
* rate is different from the requested rate. Note
* that it works only because we do not allow rate
* fallback from OFDM to CCK.
*/
mcs = (stat >> RT2860_TXQ_MCS_SHIFT) & 0x7f;
pid = (stat >> RT2860_TXQ_PID_SHIFT) & 0xf;
if (mcs + 1 != pid)
retrycnt = 1;
else
retrycnt = 0;
ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
IEEE80211_RATECTL_TX_SUCCESS, &retrycnt, NULL);
} else {
ieee80211_ratectl_tx_complete(ni->ni_vap, ni,
IEEE80211_RATECTL_TX_FAILURE, &retrycnt, NULL);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
}
}
}
static void
rt2860_tx_intr(struct rt2860_softc *sc, int qid)
{
- struct ifnet *ifp = sc->sc_ifp;
struct rt2860_tx_ring *ring = &sc->txq[qid];
uint32_t hw;
rt2860_drain_stats_fifo(sc);
hw = RAL_READ(sc, RT2860_TX_DTX_IDX(qid));
while (ring->next != hw) {
struct rt2860_tx_data *data = ring->data[ring->next];
if (data != NULL) {
bus_dmamap_sync(sc->txwi_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->txwi_dmat, data->map);
if (data->m->m_flags & M_TXCB) {
ieee80211_process_callback(data->ni, data->m,
0);
}
- m_freem(data->m);
- ieee80211_free_node(data->ni);
- data->m = NULL;
+ ieee80211_tx_complete(data->ni, data->m, 0);
data->ni = NULL;
-
+ data->m = NULL;
SLIST_INSERT_HEAD(&sc->data_pool, data, next);
ring->data[ring->next] = NULL;
-
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
ring->queued--;
ring->next = (ring->next + 1) % RT2860_TX_RING_COUNT;
}
sc->sc_tx_timer = 0;
if (ring->queued < RT2860_TX_RING_COUNT)
sc->qfullmsk &= ~(1 << qid);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- rt2860_start_locked(ifp);
+ rt2860_start(sc);
}
/*
* Return the Rx chain with the highest RSSI for a given frame.
*/
static __inline uint8_t
rt2860_maxrssi_chain(struct rt2860_softc *sc, const struct rt2860_rxwi *rxwi)
{
uint8_t rxchain = 0;
if (sc->nrxchains > 1) {
if (rxwi->rssi[1] > rxwi->rssi[rxchain])
rxchain = 1;
if (sc->nrxchains > 2)
if (rxwi->rssi[2] > rxwi->rssi[rxchain])
rxchain = 2;
}
return rxchain;
}
static void
rt2860_rx_intr(struct rt2860_softc *sc)
{
struct rt2860_rx_radiotap_header *tap;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *m, *m1;
bus_addr_t physaddr;
uint32_t hw;
uint16_t phy;
uint8_t ant;
int8_t rssi, nf;
int error;
hw = RAL_READ(sc, RT2860_FS_DRX_IDX) & 0xfff;
while (sc->rxq.cur != hw) {
struct rt2860_rx_data *data = &sc->rxq.data[sc->rxq.cur];
struct rt2860_rxd *rxd = &sc->rxq.rxd[sc->rxq.cur];
struct rt2860_rxwi *rxwi;
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_POSTREAD);
if (__predict_false(!(rxd->sdl0 & htole16(RT2860_RX_DDONE)))) {
DPRINTF(("RXD DDONE bit not set!\n"));
break; /* should not happen */
}
if (__predict_false(rxd->flags &
htole32(RT2860_RX_CRCERR | RT2860_RX_ICVERR))) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
#ifdef HW_CRYPTO
if (__predict_false(rxd->flags & htole32(RT2860_RX_MICERR))) {
/* report MIC failures to net80211 for TKIP */
ic->ic_stats.is_rx_locmicfail++;
ieee80211_michael_mic_failure(ic, 0/* XXX */);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
#endif
m1 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (__predict_false(m1 == NULL)) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->rxq.data_dmat, data->map);
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(m1, void *), MCLBYTES, rt2860_dma_map_addr,
&physaddr, 0);
if (__predict_false(error != 0)) {
m_freem(m1);
/* try to reload the old mbuf */
error = bus_dmamap_load(sc->rxq.data_dmat, data->map,
mtod(data->m, void *), MCLBYTES,
rt2860_dma_map_addr, &physaddr, 0);
if (__predict_false(error != 0)) {
panic("%s: could not load old rx mbuf",
device_get_name(sc->sc_dev));
}
/* physical address may have changed */
rxd->sdp0 = htole32(physaddr);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto skip;
}
/*
* New mbuf successfully loaded, update Rx ring and continue
* processing.
*/
m = data->m;
data->m = m1;
rxd->sdp0 = htole32(physaddr);
rxwi = mtod(m, struct rt2860_rxwi *);
/* finalize mbuf */
- m->m_pkthdr.rcvif = ifp;
m->m_data = (caddr_t)(rxwi + 1);
m->m_pkthdr.len = m->m_len = le16toh(rxwi->len) & 0xfff;
wh = mtod(m, struct ieee80211_frame *);
#ifdef HW_CRYPTO
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* frame is decrypted by hardware */
wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
}
#endif
/* HW may insert 2 padding bytes after 802.11 header */
if (rxd->flags & htole32(RT2860_RX_L2PAD)) {
u_int hdrlen = ieee80211_hdrsize(wh);
ovbcopy(wh, (caddr_t)wh + 2, hdrlen);
m->m_data += 2;
wh = mtod(m, struct ieee80211_frame *);
}
ant = rt2860_maxrssi_chain(sc, rxwi);
rssi = rt2860_rssi2dbm(sc, rxwi->rssi[ant], ant);
nf = RT2860_NOISE_FLOOR;
if (ieee80211_radiotap_active(ic)) {
tap = &sc->sc_rxtap;
tap->wr_flags = 0;
tap->wr_antenna = ant;
tap->wr_antsignal = nf + rssi;
tap->wr_antnoise = nf;
/* in case it can't be found below */
tap->wr_rate = 2;
phy = le16toh(rxwi->phy);
switch (phy & RT2860_PHY_MODE) {
case RT2860_PHY_CCK:
switch ((phy & RT2860_PHY_MCS) & ~RT2860_PHY_SHPRE) {
case 0: tap->wr_rate = 2; break;
case 1: tap->wr_rate = 4; break;
case 2: tap->wr_rate = 11; break;
case 3: tap->wr_rate = 22; break;
}
if (phy & RT2860_PHY_SHPRE)
tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
break;
case RT2860_PHY_OFDM:
switch (phy & RT2860_PHY_MCS) {
case 0: tap->wr_rate = 12; break;
case 1: tap->wr_rate = 18; break;
case 2: tap->wr_rate = 24; break;
case 3: tap->wr_rate = 36; break;
case 4: tap->wr_rate = 48; break;
case 5: tap->wr_rate = 72; break;
case 6: tap->wr_rate = 96; break;
case 7: tap->wr_rate = 108; break;
}
break;
}
}
RAL_UNLOCK(sc);
wh = mtod(m, struct ieee80211_frame *);
/* send the frame to the 802.11 layer */
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
if (ni != NULL) {
(void)ieee80211_input(ni, m, rssi - nf, nf);
ieee80211_free_node(ni);
} else
(void)ieee80211_input_all(ic, m, rssi - nf, nf);
RAL_LOCK(sc);
skip: rxd->sdl0 &= ~htole16(RT2860_RX_DDONE);
bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map,
BUS_DMASYNC_PREWRITE);
sc->rxq.cur = (sc->rxq.cur + 1) % RT2860_RX_RING_COUNT;
}
/* tell HW what we have processed */
RAL_WRITE(sc, RT2860_RX_CALC_IDX,
(sc->rxq.cur - 1) % RT2860_RX_RING_COUNT);
}
static void
rt2860_tbtt_intr(struct rt2860_softc *sc)
{
#if 0
struct ieee80211com *ic = &sc->sc_ic;
#ifndef IEEE80211_STA_ONLY
if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
/* one less beacon until next DTIM */
if (ic->ic_dtim_count == 0)
ic->ic_dtim_count = ic->ic_dtim_period - 1;
else
ic->ic_dtim_count--;
/* update dynamic parts of beacon */
rt2860_setup_beacon(sc);
/* flush buffered multicast frames */
if (ic->ic_dtim_count == 0)
ieee80211_notify_dtim(ic);
}
#endif
/* check if protection mode has changed */
if ((sc->sc_ic_flags ^ ic->ic_flags) & IEEE80211_F_USEPROT) {
- rt2860_updateprot(ic);
+ rt2860_updateprot(sc);
sc->sc_ic_flags = ic->ic_flags;
}
#endif
}
static void
rt2860_gp_intr(struct rt2860_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
DPRINTFN(2, ("GP timeout state=%d\n", vap->iv_state));
if (vap->iv_state == IEEE80211_S_RUN)
rt2860_updatestats(sc);
}
void
rt2860_intr(void *arg)
{
struct rt2860_softc *sc = arg;
uint32_t r;
RAL_LOCK(sc);
r = RAL_READ(sc, RT2860_INT_STATUS);
if (__predict_false(r == 0xffffffff)) {
RAL_UNLOCK(sc);
return; /* device likely went away */
}
if (r == 0) {
RAL_UNLOCK(sc);
return; /* not for us */
}
/* acknowledge interrupts */
RAL_WRITE(sc, RT2860_INT_STATUS, r);
if (r & RT2860_TX_RX_COHERENT)
rt2860_intr_coherent(sc);
if (r & RT2860_MAC_INT_2) /* TX status */
rt2860_drain_stats_fifo(sc);
if (r & RT2860_TX_DONE_INT5)
rt2860_tx_intr(sc, 5);
if (r & RT2860_RX_DONE_INT)
rt2860_rx_intr(sc);
if (r & RT2860_TX_DONE_INT4)
rt2860_tx_intr(sc, 4);
if (r & RT2860_TX_DONE_INT3)
rt2860_tx_intr(sc, 3);
if (r & RT2860_TX_DONE_INT2)
rt2860_tx_intr(sc, 2);
if (r & RT2860_TX_DONE_INT1)
rt2860_tx_intr(sc, 1);
if (r & RT2860_TX_DONE_INT0)
rt2860_tx_intr(sc, 0);
if (r & RT2860_MAC_INT_0) /* TBTT */
rt2860_tbtt_intr(sc);
if (r & RT2860_MAC_INT_3) /* Auto wakeup */
/* TBD wakeup */;
if (r & RT2860_MAC_INT_4) /* GP timer */
rt2860_gp_intr(sc);
RAL_UNLOCK(sc);
}
static int
rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct rt2860_tx_ring *ring;
struct rt2860_tx_data *data;
struct rt2860_txd *txd;
struct rt2860_txwi *txwi;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp;
struct ieee80211_key *k;
struct mbuf *m1;
bus_dma_segment_t segs[RT2860_MAX_SCATTER];
bus_dma_segment_t *seg;
u_int hdrlen;
uint16_t qos, dur;
uint8_t type, qsel, mcs, pid, tid, qid;
int i, nsegs, ntxds, pad, rate, ridx, error;
/* the data pool contains at least one element, pick the first */
data = SLIST_FIRST(&sc->data_pool);
wh = mtod(m, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
m_freem(m);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m, struct ieee80211_frame *);
}
hdrlen = ieee80211_anyhdrsize(wh);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
rate = tp->mcastrate;
} else if (m->m_flags & M_EAPOL) {
rate = tp->mgmtrate;
} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
rate = tp->ucastrate;
} else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
}
rate &= IEEE80211_RATE_VAL;
qid = M_WME_GETAC(m);
if (IEEE80211_QOS_HAS_SEQ(wh)) {
qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
tid = qos & IEEE80211_QOS_TID;
} else {
qos = 0;
tid = 0;
}
ring = &sc->txq[qid];
ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, rate);
/* get MCS code from rate index */
mcs = rt2860_rates[ridx].mcs;
/* setup TX Wireless Information */
txwi = data->txwi;
txwi->flags = 0;
/* let HW generate seq numbers for non-QoS frames */
txwi->xflags = qos ? 0 : RT2860_TX_NSEQ;
if (type == IEEE80211_FC0_TYPE_DATA)
txwi->wcid = IEEE80211_AID(ni->ni_associd);
else
txwi->wcid = 0xff;
txwi->len = htole16(m->m_pkthdr.len);
if (rt2860_rates[ridx].phy == IEEE80211_T_DS) {
txwi->phy = htole16(RT2860_PHY_CCK);
if (ridx != RT2860_RIDX_CCK1 &&
(ic->ic_flags & IEEE80211_F_SHPREAMBLE))
mcs |= RT2860_PHY_SHPRE;
} else
txwi->phy = htole16(RT2860_PHY_OFDM);
txwi->phy |= htole16(mcs);
/*
* We store the MCS code into the driver-private PacketID field.
* The PacketID is latched into TX_STAT_FIFO when Tx completes so
* that we know at which initial rate the frame was transmitted.
* We add 1 to the MCS code because setting the PacketID field to
* 0 means that we don't want feedback in TX_STAT_FIFO.
*/
pid = (mcs + 1) & 0xf;
txwi->len |= htole16(pid << RT2860_TX_PID_SHIFT);
/* check if RTS/CTS or CTS-to-self protection is required */
if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
(m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold ||
((ic->ic_flags & IEEE80211_F_USEPROT) &&
rt2860_rates[ridx].phy == IEEE80211_T_OFDM)))
txwi->txop = RT2860_TX_TXOP_HT;
else
txwi->txop = RT2860_TX_TXOP_BACKOFF;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
(!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
IEEE80211_QOS_ACKPOLICY_NOACK)) {
txwi->xflags |= RT2860_TX_ACK;
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
dur = rt2860_rates[ridx].sp_ack_dur;
else
dur = rt2860_rates[ridx].lp_ack_dur;
*(uint16_t *)wh->i_dur = htole16(dur);
}
/* ask MAC to insert timestamp into probe responses */
if ((wh->i_fc[0] &
(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
(IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
/* NOTE: beacons do not pass through tx_data() */
txwi->flags |= RT2860_TX_TS;
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2860_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
if (mcs & RT2860_PHY_SHPRE)
tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
ieee80211_radiotap_tx(vap, m);
}
pad = (hdrlen + 3) & ~3;
/* copy and trim 802.11 header */
memcpy(txwi + 1, wh, hdrlen);
m_adj(m, hdrlen);
error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, segs,
&nsegs, 0);
if (__predict_false(error != 0 && error != EFBIG)) {
device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
error);
m_freem(m);
return error;
}
if (__predict_true(error == 0)) {
/* determine how many TXDs are required */
ntxds = 1 + (nsegs / 2);
if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) {
/* not enough free TXDs, force mbuf defrag */
bus_dmamap_unload(sc->txwi_dmat, data->map);
error = EFBIG;
}
}
if (__predict_false(error != 0)) {
m1 = m_defrag(m, M_NOWAIT);
if (m1 == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
m_freem(m);
return ENOBUFS;
}
m = m1;
error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m,
segs, &nsegs, 0);
if (__predict_false(error != 0)) {
device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
error);
m_freem(m);
return error;
}
/* determine how many TXDs are now required */
ntxds = 1 + (nsegs / 2);
if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) {
/* this is a hopeless case, drop the mbuf! */
bus_dmamap_unload(sc->txwi_dmat, data->map);
m_freem(m);
return ENOBUFS;
}
}
qsel = (qid < WME_NUM_AC) ? RT2860_TX_QSEL_EDCA : RT2860_TX_QSEL_MGMT;
/* first segment is TXWI + 802.11 header */
txd = &ring->txd[ring->cur];
txd->sdp0 = htole32(data->paddr);
txd->sdl0 = htole16(sizeof (struct rt2860_txwi) + pad);
txd->flags = qsel;
/* setup payload segments */
seg = &segs[0];
for (i = nsegs; i >= 2; i -= 2) {
txd->sdp1 = htole32(seg->ds_addr);
txd->sdl1 = htole16(seg->ds_len);
seg++;
ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT;
/* grab a new Tx descriptor */
txd = &ring->txd[ring->cur];
txd->sdp0 = htole32(seg->ds_addr);
txd->sdl0 = htole16(seg->ds_len);
txd->flags = qsel;
seg++;
}
/* finalize last segment */
if (i > 0) {
txd->sdp1 = htole32(seg->ds_addr);
txd->sdl1 = htole16(seg->ds_len | RT2860_TX_LS1);
} else {
txd->sdl0 |= htole16(RT2860_TX_LS0);
txd->sdl1 = 0;
}
/* remove from the free pool and link it into the SW Tx slot */
SLIST_REMOVE_HEAD(&sc->data_pool, next);
data->m = m;
data->ni = ni;
ring->data[ring->cur] = data;
bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->txwi_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
DPRINTFN(4, ("sending frame qid=%d wcid=%d nsegs=%d ridx=%d\n",
qid, txwi->wcid, nsegs, ridx));
ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT;
ring->queued += ntxds;
if (ring->queued >= RT2860_TX_RING_COUNT)
sc->qfullmsk |= 1 << qid;
/* kick Tx */
RAL_WRITE(sc, RT2860_TX_CTX_IDX(qid), ring->cur);
return 0;
}
static int
rt2860_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2860_softc *sc = ifp->if_softc;
+ struct rt2860_softc *sc = ic->ic_softc;
int error;
RAL_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(sc->sc_flags & RT2860_RUNNNING)) {
RAL_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return ENETDOWN;
}
if (params == NULL) {
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
*/
error = rt2860_tx(sc, m, ni);
} else {
/*
* Caller supplied explicit parameters to use in
* sending the frame.
*/
error = rt2860_tx_raw(sc, m, ni, params);
}
if (error != 0) {
/* NB: m is reclaimed on tx failure */
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
sc->sc_tx_timer = 5;
RAL_UNLOCK(sc);
return error;
}
static int
rt2860_tx_raw(struct rt2860_softc *sc, struct mbuf *m,
struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct rt2860_tx_ring *ring;
struct rt2860_tx_data *data;
struct rt2860_txd *txd;
struct rt2860_txwi *txwi;
struct ieee80211_frame *wh;
struct mbuf *m1;
bus_dma_segment_t segs[RT2860_MAX_SCATTER];
bus_dma_segment_t *seg;
u_int hdrlen;
uint16_t dur;
uint8_t type, qsel, mcs, pid, tid, qid;
int i, nsegs, ntxds, pad, rate, ridx, error;
/* the data pool contains at least one element, pick the first */
data = SLIST_FIRST(&sc->data_pool);
wh = mtod(m, struct ieee80211_frame *);
hdrlen = ieee80211_hdrsize(wh);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
/* Choose a TX rate index. */
rate = params->ibp_rate0;
ridx = ieee80211_legacy_rate_lookup(ic->ic_rt,
rate & IEEE80211_RATE_VAL);
if (ridx == (uint8_t)-1) {
/* XXX fall back to mcast/mgmt rate? */
m_freem(m);
return EINVAL;
}
qid = params->ibp_pri & 3;
tid = 0;
ring = &sc->txq[qid];
/* get MCS code from rate index */
mcs = rt2860_rates[ridx].mcs;
/* setup TX Wireless Information */
txwi = data->txwi;
txwi->flags = 0;
/* let HW generate seq numbers for non-QoS frames */
txwi->xflags = params->ibp_pri & 3 ? 0 : RT2860_TX_NSEQ;
txwi->wcid = 0xff;
txwi->len = htole16(m->m_pkthdr.len);
if (rt2860_rates[ridx].phy == IEEE80211_T_DS) {
txwi->phy = htole16(RT2860_PHY_CCK);
if (ridx != RT2860_RIDX_CCK1 &&
(ic->ic_flags & IEEE80211_F_SHPREAMBLE))
mcs |= RT2860_PHY_SHPRE;
} else
txwi->phy = htole16(RT2860_PHY_OFDM);
txwi->phy |= htole16(mcs);
/*
* We store the MCS code into the driver-private PacketID field.
* The PacketID is latched into TX_STAT_FIFO when Tx completes so
* that we know at which initial rate the frame was transmitted.
* We add 1 to the MCS code because setting the PacketID field to
* 0 means that we don't want feedback in TX_STAT_FIFO.
*/
pid = (mcs + 1) & 0xf;
txwi->len |= htole16(pid << RT2860_TX_PID_SHIFT);
/* check if RTS/CTS or CTS-to-self protection is required */
if (params->ibp_flags & IEEE80211_BPF_RTS ||
params->ibp_flags & IEEE80211_BPF_CTS)
txwi->txop = RT2860_TX_TXOP_HT;
else
txwi->txop = RT2860_TX_TXOP_BACKOFF;
if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) {
txwi->xflags |= RT2860_TX_ACK;
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
dur = rt2860_rates[ridx].sp_ack_dur;
else
dur = rt2860_rates[ridx].lp_ack_dur;
*(uint16_t *)wh->i_dur = htole16(dur);
}
/* ask MAC to insert timestamp into probe responses */
if ((wh->i_fc[0] &
(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
(IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
/* NOTE: beacons do not pass through tx_data() */
txwi->flags |= RT2860_TX_TS;
if (ieee80211_radiotap_active_vap(vap)) {
struct rt2860_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
if (mcs & RT2860_PHY_SHPRE)
tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
ieee80211_radiotap_tx(vap, m);
}
pad = (hdrlen + 3) & ~3;
/* copy and trim 802.11 header */
memcpy(txwi + 1, wh, hdrlen);
m_adj(m, hdrlen);
error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m, segs,
&nsegs, 0);
if (__predict_false(error != 0 && error != EFBIG)) {
device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
error);
m_freem(m);
return error;
}
if (__predict_true(error == 0)) {
/* determine how many TXDs are required */
ntxds = 1 + (nsegs / 2);
if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) {
/* not enough free TXDs, force mbuf defrag */
bus_dmamap_unload(sc->txwi_dmat, data->map);
error = EFBIG;
}
}
if (__predict_false(error != 0)) {
m1 = m_defrag(m, M_NOWAIT);
if (m1 == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
m_freem(m);
return ENOBUFS;
}
m = m1;
error = bus_dmamap_load_mbuf_sg(sc->txwi_dmat, data->map, m,
segs, &nsegs, 0);
if (__predict_false(error != 0)) {
device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
error);
m_freem(m);
return error;
}
/* determine how many TXDs are now required */
ntxds = 1 + (nsegs / 2);
if (ring->queued + ntxds >= RT2860_TX_RING_COUNT) {
/* this is a hopeless case, drop the mbuf! */
bus_dmamap_unload(sc->txwi_dmat, data->map);
m_freem(m);
return ENOBUFS;
}
}
qsel = (qid < WME_NUM_AC) ? RT2860_TX_QSEL_EDCA : RT2860_TX_QSEL_MGMT;
/* first segment is TXWI + 802.11 header */
txd = &ring->txd[ring->cur];
txd->sdp0 = htole32(data->paddr);
txd->sdl0 = htole16(sizeof (struct rt2860_txwi) + pad);
txd->flags = qsel;
/* setup payload segments */
seg = &segs[0];
for (i = nsegs; i >= 2; i -= 2) {
txd->sdp1 = htole32(seg->ds_addr);
txd->sdl1 = htole16(seg->ds_len);
seg++;
ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT;
/* grab a new Tx descriptor */
txd = &ring->txd[ring->cur];
txd->sdp0 = htole32(seg->ds_addr);
txd->sdl0 = htole16(seg->ds_len);
txd->flags = qsel;
seg++;
}
/* finalize last segment */
if (i > 0) {
txd->sdp1 = htole32(seg->ds_addr);
txd->sdl1 = htole16(seg->ds_len | RT2860_TX_LS1);
} else {
txd->sdl0 |= htole16(RT2860_TX_LS0);
txd->sdl1 = 0;
}
/* remove from the free pool and link it into the SW Tx slot */
SLIST_REMOVE_HEAD(&sc->data_pool, next);
data->m = m;
data->ni = ni;
ring->data[ring->cur] = data;
bus_dmamap_sync(sc->txwi_dmat, sc->txwi_map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->txwi_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE);
DPRINTFN(4, ("sending frame qid=%d wcid=%d nsegs=%d ridx=%d\n",
qid, txwi->wcid, nsegs, ridx));
ring->cur = (ring->cur + 1) % RT2860_TX_RING_COUNT;
ring->queued += ntxds;
if (ring->queued >= RT2860_TX_RING_COUNT)
sc->qfullmsk |= 1 << qid;
/* kick Tx */
RAL_WRITE(sc, RT2860_TX_CTX_IDX(qid), ring->cur);
return 0;
}
-static void
-rt2860_start(struct ifnet *ifp)
+static int
+rt2860_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct rt2860_softc *sc = ifp->if_softc;
+ struct rt2860_softc *sc = ic->ic_softc;
+ int error;
RAL_LOCK(sc);
- rt2860_start_locked(ifp);
+ if ((sc->sc_flags & RT2860_RUNNNING) == 0) {
+ RAL_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ RAL_UNLOCK(sc);
+ return (error);
+ }
+ rt2860_start(sc);
RAL_UNLOCK(sc);
+
+ return (0);
}
static void
-rt2860_start_locked(struct ifnet *ifp)
+rt2860_start(struct rt2860_softc *sc)
{
- struct rt2860_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
RAL_LOCK_ASSERT(sc);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
- (ifp->if_drv_flags & IFF_DRV_OACTIVE))
+ if ((sc->sc_flags & RT2860_RUNNNING) == 0)
return;
- for (;;) {
- if (SLIST_EMPTY(&sc->data_pool) || sc->qfullmsk != 0) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
+ while (!SLIST_EMPTY(&sc->data_pool) && sc->qfullmsk == 0 &&
+ (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
if (rt2860_tx(sc, m, ni) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
continue;
}
sc->sc_tx_timer = 5;
}
}
static void
rt2860_watchdog(void *arg)
{
struct rt2860_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
RAL_LOCK_ASSERT(sc);
- KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running"));
+ KASSERT(sc->sc_flags & RT2860_RUNNNING, ("not running"));
if (sc->sc_invalid) /* card ejected */
return;
if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) {
- if_printf(ifp, "device timeout\n");
+ device_printf(sc->sc_dev, "device timeout\n");
rt2860_stop_locked(sc);
rt2860_init_locked(sc);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
return;
}
callout_reset(&sc->watchdog_ch, hz, rt2860_watchdog, sc);
}
-static int
-rt2860_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+rt2860_parent(struct ieee80211com *ic)
{
- struct rt2860_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *)data;
- int error = 0, startall = 0;
+ struct rt2860_softc *sc = ic->ic_softc;
+ int startall = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- RAL_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- rt2860_init_locked(sc);
- startall = 1;
- } else
- rt2860_update_promisc(ic);
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- rt2860_stop_locked(sc);
- }
- RAL_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCSIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
- }
- return error;
+ RAL_LOCK(sc);
+ if (ic->ic_nrunning> 0) {
+ if (!(sc->sc_flags & RT2860_RUNNNING)) {
+ rt2860_init_locked(sc);
+ startall = 1;
+ } else
+ rt2860_update_promisc(ic);
+ } else if (sc->sc_flags & RT2860_RUNNNING)
+ rt2860_stop_locked(sc);
+ RAL_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
/*
* Reading and writing from/to the BBP is different from RT2560 and RT2661.
* We access the BBP through the 8051 microcontroller unit which means that
* the microcode must be loaded first.
*/
void
rt2860_mcu_bbp_write(struct rt2860_softc *sc, uint8_t reg, uint8_t val)
{
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2860_H2M_BBPAGENT) & RT2860_BBP_CSR_KICK))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"could not write to BBP through MCU\n");
return;
}
RAL_WRITE(sc, RT2860_H2M_BBPAGENT, RT2860_BBP_RW_PARALLEL |
RT2860_BBP_CSR_KICK | reg << 8 | val);
RAL_BARRIER_WRITE(sc);
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_BBP, 0, 0);
DELAY(1000);
}
uint8_t
rt2860_mcu_bbp_read(struct rt2860_softc *sc, uint8_t reg)
{
uint32_t val;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2860_H2M_BBPAGENT) & RT2860_BBP_CSR_KICK))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"could not read from BBP through MCU\n");
return 0;
}
RAL_WRITE(sc, RT2860_H2M_BBPAGENT, RT2860_BBP_RW_PARALLEL |
RT2860_BBP_CSR_KICK | RT2860_BBP_CSR_READ | reg << 8);
RAL_BARRIER_WRITE(sc);
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_BBP, 0, 0);
DELAY(1000);
for (ntries = 0; ntries < 100; ntries++) {
val = RAL_READ(sc, RT2860_H2M_BBPAGENT);
if (!(val & RT2860_BBP_CSR_KICK))
return val & 0xff;
DELAY(1);
}
device_printf(sc->sc_dev, "could not read from BBP through MCU\n");
return 0;
}
/*
* Write to one of the 4 programmable 24-bit RF registers.
*/
static void
rt2860_rf_write(struct rt2860_softc *sc, uint8_t reg, uint32_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2860_RF_CSR_CFG0) & RT2860_RF_REG_CTRL))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to RF\n");
return;
}
/* RF registers are 24-bit on the RT2860 */
tmp = RT2860_RF_REG_CTRL | 24 << RT2860_RF_REG_WIDTH_SHIFT |
(val & 0x3fffff) << 2 | (reg & 3);
RAL_WRITE(sc, RT2860_RF_CSR_CFG0, tmp);
}
static uint8_t
rt3090_rf_read(struct rt2860_softc *sc, uint8_t reg)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT3070_RF_CSR_CFG) & RT3070_RF_KICK))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not read RF register\n");
return 0xff;
}
tmp = RT3070_RF_KICK | reg << 8;
RAL_WRITE(sc, RT3070_RF_CSR_CFG, tmp);
for (ntries = 0; ntries < 100; ntries++) {
tmp = RAL_READ(sc, RT3070_RF_CSR_CFG);
if (!(tmp & RT3070_RF_KICK))
break;
DELAY(1);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not read RF register\n");
return 0xff;
}
return tmp & 0xff;
}
void
rt3090_rf_write(struct rt2860_softc *sc, uint8_t reg, uint8_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 10; ntries++) {
if (!(RAL_READ(sc, RT3070_RF_CSR_CFG) & RT3070_RF_KICK))
break;
DELAY(10);
}
if (ntries == 10) {
device_printf(sc->sc_dev, "could not write to RF\n");
return;
}
tmp = RT3070_RF_WRITE | RT3070_RF_KICK | reg << 8 | val;
RAL_WRITE(sc, RT3070_RF_CSR_CFG, tmp);
}
/*
* Send a command to the 8051 microcontroller unit.
*/
int
rt2860_mcu_cmd(struct rt2860_softc *sc, uint8_t cmd, uint16_t arg, int wait)
{
int slot, ntries;
uint32_t tmp;
uint8_t cid;
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2860_H2M_MAILBOX) & RT2860_H2M_BUSY))
break;
DELAY(2);
}
if (ntries == 100)
return EIO;
cid = wait ? cmd : RT2860_TOKEN_NO_INTR;
RAL_WRITE(sc, RT2860_H2M_MAILBOX, RT2860_H2M_BUSY | cid << 16 | arg);
RAL_BARRIER_WRITE(sc);
RAL_WRITE(sc, RT2860_HOST_CMD, cmd);
if (!wait)
return 0;
/* wait for the command to complete */
for (ntries = 0; ntries < 200; ntries++) {
tmp = RAL_READ(sc, RT2860_H2M_MAILBOX_CID);
/* find the command slot */
for (slot = 0; slot < 4; slot++, tmp >>= 8)
if ((tmp & 0xff) == cid)
break;
if (slot < 4)
break;
DELAY(100);
}
if (ntries == 200) {
/* clear command and status */
RAL_WRITE(sc, RT2860_H2M_MAILBOX_STATUS, 0xffffffff);
RAL_WRITE(sc, RT2860_H2M_MAILBOX_CID, 0xffffffff);
return ETIMEDOUT;
}
/* get command status (1 means success) */
tmp = RAL_READ(sc, RT2860_H2M_MAILBOX_STATUS);
tmp = (tmp >> (slot * 8)) & 0xff;
DPRINTF(("MCU command=0x%02x slot=%d status=0x%02x\n",
cmd, slot, tmp));
/* clear command and status */
RAL_WRITE(sc, RT2860_H2M_MAILBOX_STATUS, 0xffffffff);
RAL_WRITE(sc, RT2860_H2M_MAILBOX_CID, 0xffffffff);
return (tmp == 1) ? 0 : EIO;
}
static void
rt2860_enable_mrr(struct rt2860_softc *sc)
{
#define CCK(mcs) (mcs)
#define OFDM(mcs) (1 << 3 | (mcs))
RAL_WRITE(sc, RT2860_LG_FBK_CFG0,
OFDM(6) << 28 | /* 54->48 */
OFDM(5) << 24 | /* 48->36 */
OFDM(4) << 20 | /* 36->24 */
OFDM(3) << 16 | /* 24->18 */
OFDM(2) << 12 | /* 18->12 */
OFDM(1) << 8 | /* 12-> 9 */
OFDM(0) << 4 | /* 9-> 6 */
OFDM(0)); /* 6-> 6 */
RAL_WRITE(sc, RT2860_LG_FBK_CFG1,
CCK(2) << 12 | /* 11->5.5 */
CCK(1) << 8 | /* 5.5-> 2 */
CCK(0) << 4 | /* 2-> 1 */
CCK(0)); /* 1-> 1 */
#undef OFDM
#undef CCK
}
static void
rt2860_set_txpreamble(struct rt2860_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
tmp = RAL_READ(sc, RT2860_AUTO_RSP_CFG);
tmp &= ~RT2860_CCK_SHORT_EN;
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
tmp |= RT2860_CCK_SHORT_EN;
RAL_WRITE(sc, RT2860_AUTO_RSP_CFG, tmp);
}
void
rt2860_set_basicrates(struct rt2860_softc *sc,
const struct ieee80211_rateset *rs)
{
#define RV(r) ((r) & IEEE80211_RATE_VAL)
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t mask = 0;
uint8_t rate;
int i;
for (i = 0; i < rs->rs_nrates; i++) {
rate = rs->rs_rates[i];
if (!(rate & IEEE80211_RATE_BASIC))
continue;
mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, RV(rate));
}
RAL_WRITE(sc, RT2860_LEGACY_BASIC_RATE, mask);
#undef RV
}
static void
rt2860_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2860_softc *sc = ifp->if_softc;
+ struct rt2860_softc *sc = ic->ic_softc;
uint32_t tmp;
tmp = RAL_READ(sc, RT2860_BCN_TIME_CFG);
RAL_WRITE(sc, RT2860_BCN_TIME_CFG,
tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN |
RT2860_TBTT_TIMER_EN));
rt2860_set_gp_timer(sc, 0);
}
static void
rt2860_scan_end(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2860_softc *sc = ifp->if_softc;
+ struct rt2860_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
if (vap->iv_state == IEEE80211_S_RUN) {
rt2860_enable_tsf_sync(sc);
rt2860_set_gp_timer(sc, 500);
}
}
static void
rt2860_set_channel(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct rt2860_softc *sc = ifp->if_softc;
+ struct rt2860_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
rt2860_switch_chan(sc, ic->ic_curchan);
RAL_UNLOCK(sc);
}
static void
rt2860_select_chan_group(struct rt2860_softc *sc, int group)
{
uint32_t tmp;
uint8_t agc;
rt2860_mcu_bbp_write(sc, 62, 0x37 - sc->lna[group]);
rt2860_mcu_bbp_write(sc, 63, 0x37 - sc->lna[group]);
rt2860_mcu_bbp_write(sc, 64, 0x37 - sc->lna[group]);
rt2860_mcu_bbp_write(sc, 86, 0x00);
if (group == 0) {
if (sc->ext_2ghz_lna) {
rt2860_mcu_bbp_write(sc, 82, 0x62);
rt2860_mcu_bbp_write(sc, 75, 0x46);
} else {
rt2860_mcu_bbp_write(sc, 82, 0x84);
rt2860_mcu_bbp_write(sc, 75, 0x50);
}
} else {
if (sc->ext_5ghz_lna) {
rt2860_mcu_bbp_write(sc, 82, 0xf2);
rt2860_mcu_bbp_write(sc, 75, 0x46);
} else {
rt2860_mcu_bbp_write(sc, 82, 0xf2);
rt2860_mcu_bbp_write(sc, 75, 0x50);
}
}
tmp = RAL_READ(sc, RT2860_TX_BAND_CFG);
tmp &= ~(RT2860_5G_BAND_SEL_N | RT2860_5G_BAND_SEL_P);
tmp |= (group == 0) ? RT2860_5G_BAND_SEL_N : RT2860_5G_BAND_SEL_P;
RAL_WRITE(sc, RT2860_TX_BAND_CFG, tmp);
/* enable appropriate Power Amplifiers and Low Noise Amplifiers */
tmp = RT2860_RFTR_EN | RT2860_TRSW_EN | RT2860_LNA_PE0_EN;
if (sc->nrxchains > 1)
tmp |= RT2860_LNA_PE1_EN;
if (sc->mac_ver == 0x3593 && sc->nrxchains > 2)
tmp |= RT3593_LNA_PE2_EN;
if (group == 0) { /* 2GHz */
tmp |= RT2860_PA_PE_G0_EN;
if (sc->ntxchains > 1)
tmp |= RT2860_PA_PE_G1_EN;
if (sc->mac_ver == 0x3593 && sc->ntxchains > 2)
tmp |= RT3593_PA_PE_G2_EN;
} else { /* 5GHz */
tmp |= RT2860_PA_PE_A0_EN;
if (sc->ntxchains > 1)
tmp |= RT2860_PA_PE_A1_EN;
if (sc->mac_ver == 0x3593 && sc->ntxchains > 2)
tmp |= RT3593_PA_PE_A2_EN;
}
RAL_WRITE(sc, RT2860_TX_PIN_CFG, tmp);
if (sc->mac_ver == 0x3593) {
tmp = RAL_READ(sc, RT2860_GPIO_CTRL);
if (sc->sc_flags & RT2860_PCIE) {
tmp &= ~0x01010000;
if (group == 0)
tmp |= 0x00010000;
} else {
tmp &= ~0x00008080;
if (group == 0)
tmp |= 0x00000080;
}
tmp = (tmp & ~0x00001000) | 0x00000010;
RAL_WRITE(sc, RT2860_GPIO_CTRL, tmp);
}
/* set initial AGC value */
if (group == 0) { /* 2GHz band */
if (sc->mac_ver >= 0x3071)
agc = 0x1c + sc->lna[0] * 2;
else
agc = 0x2e + sc->lna[0];
} else { /* 5GHz band */
agc = 0x32 + (sc->lna[group] * 5) / 3;
}
rt2860_mcu_bbp_write(sc, 66, agc);
DELAY(1000);
}
static void
rt2860_set_chan(struct rt2860_softc *sc, u_int chan)
{
const struct rfprog *rfprog = rt2860_rf2850;
uint32_t r2, r3, r4;
int8_t txpow1, txpow2;
u_int i;
/* find the settings for this channel (we know it exists) */
for (i = 0; rfprog[i].chan != chan; i++);
r2 = rfprog[i].r2;
if (sc->ntxchains == 1)
r2 |= 1 << 12; /* 1T: disable Tx chain 2 */
if (sc->nrxchains == 1)
r2 |= 1 << 15 | 1 << 4; /* 1R: disable Rx chains 2 & 3 */
else if (sc->nrxchains == 2)
r2 |= 1 << 4; /* 2R: disable Rx chain 3 */
/* use Tx power values from EEPROM */
txpow1 = sc->txpow1[i];
txpow2 = sc->txpow2[i];
if (chan > 14) {
if (txpow1 >= 0)
txpow1 = txpow1 << 1 | 1;
else
txpow1 = (7 + txpow1) << 1;
if (txpow2 >= 0)
txpow2 = txpow2 << 1 | 1;
else
txpow2 = (7 + txpow2) << 1;
}
r3 = rfprog[i].r3 | txpow1 << 7;
r4 = rfprog[i].r4 | sc->freq << 13 | txpow2 << 4;
rt2860_rf_write(sc, RT2860_RF1, rfprog[i].r1);
rt2860_rf_write(sc, RT2860_RF2, r2);
rt2860_rf_write(sc, RT2860_RF3, r3);
rt2860_rf_write(sc, RT2860_RF4, r4);
DELAY(200);
rt2860_rf_write(sc, RT2860_RF1, rfprog[i].r1);
rt2860_rf_write(sc, RT2860_RF2, r2);
rt2860_rf_write(sc, RT2860_RF3, r3 | 1);
rt2860_rf_write(sc, RT2860_RF4, r4);
DELAY(200);
rt2860_rf_write(sc, RT2860_RF1, rfprog[i].r1);
rt2860_rf_write(sc, RT2860_RF2, r2);
rt2860_rf_write(sc, RT2860_RF3, r3);
rt2860_rf_write(sc, RT2860_RF4, r4);
}
static void
rt3090_set_chan(struct rt2860_softc *sc, u_int chan)
{
int8_t txpow1, txpow2;
uint8_t rf;
int i;
/* RT3090 is 2GHz only */
KASSERT(chan >= 1 && chan <= 14, ("chan %d not support", chan));
/* find the settings for this channel (we know it exists) */
for (i = 0; rt2860_rf2850[i].chan != chan; i++);
/* use Tx power values from EEPROM */
txpow1 = sc->txpow1[i];
txpow2 = sc->txpow2[i];
rt3090_rf_write(sc, 2, rt3090_freqs[i].n);
rf = rt3090_rf_read(sc, 3);
rf = (rf & ~0x0f) | rt3090_freqs[i].k;
rt3090_rf_write(sc, 3, rf);
rf = rt3090_rf_read(sc, 6);
rf = (rf & ~0x03) | rt3090_freqs[i].r;
rt3090_rf_write(sc, 6, rf);
/* set Tx0 power */
rf = rt3090_rf_read(sc, 12);
rf = (rf & ~0x1f) | txpow1;
rt3090_rf_write(sc, 12, rf);
/* set Tx1 power */
rf = rt3090_rf_read(sc, 13);
rf = (rf & ~0x1f) | txpow2;
rt3090_rf_write(sc, 13, rf);
rf = rt3090_rf_read(sc, 1);
rf &= ~0xfc;
if (sc->ntxchains == 1)
rf |= RT3070_TX1_PD | RT3070_TX2_PD;
else if (sc->ntxchains == 2)
rf |= RT3070_TX2_PD;
if (sc->nrxchains == 1)
rf |= RT3070_RX1_PD | RT3070_RX2_PD;
else if (sc->nrxchains == 2)
rf |= RT3070_RX2_PD;
rt3090_rf_write(sc, 1, rf);
/* set RF offset */
rf = rt3090_rf_read(sc, 23);
rf = (rf & ~0x7f) | sc->freq;
rt3090_rf_write(sc, 23, rf);
/* program RF filter */
rf = rt3090_rf_read(sc, 24); /* Tx */
rf = (rf & ~0x3f) | sc->rf24_20mhz;
rt3090_rf_write(sc, 24, rf);
rf = rt3090_rf_read(sc, 31); /* Rx */
rf = (rf & ~0x3f) | sc->rf24_20mhz;
rt3090_rf_write(sc, 31, rf);
/* enable RF tuning */
rf = rt3090_rf_read(sc, 7);
rt3090_rf_write(sc, 7, rf | RT3070_TUNE);
}
static void
rt5390_set_chan(struct rt2860_softc *sc, u_int chan)
{
uint8_t h20mhz, rf, tmp;
int8_t txpow1, txpow2;
int i;
/* RT5390 is 2GHz only */
KASSERT(chan >= 1 && chan <= 14, ("chan %d not support", chan));
/* find the settings for this channel (we know it exists) */
for (i = 0; rt2860_rf2850[i].chan != chan; i++);
/* use Tx power values from EEPROM */
txpow1 = sc->txpow1[i];
txpow2 = sc->txpow2[i];
rt3090_rf_write(sc, 8, rt3090_freqs[i].n);
rt3090_rf_write(sc, 9, rt3090_freqs[i].k & 0x0f);
rf = rt3090_rf_read(sc, 11);
rf = (rf & ~0x03) | (rt3090_freqs[i].r & 0x03);
rt3090_rf_write(sc, 11, rf);
rf = rt3090_rf_read(sc, 49);
rf = (rf & ~0x3f) | (txpow1 & 0x3f);
/* the valid range of the RF R49 is 0x00~0x27 */
if ((rf & 0x3f) > 0x27)
rf = (rf & ~0x3f) | 0x27;
rt3090_rf_write(sc, 49, rf);
if (sc->mac_ver == 0x5392) {
rf = rt3090_rf_read(sc, 50);
rf = (rf & ~0x3f) | (txpow2 & 0x3f);
/* the valid range of the RF R50 is 0x00~0x27 */
if ((rf & 0x3f) > 0x27)
rf = (rf & ~0x3f) | 0x27;
rt3090_rf_write(sc, 50, rf);
}
rf = rt3090_rf_read(sc, 1);
rf |= RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD | RT3070_TX0_PD;
if (sc->mac_ver == 0x5392)
rf |= RT3070_RX1_PD | RT3070_TX1_PD;
rt3090_rf_write(sc, 1, rf);
rf = rt3090_rf_read(sc, 2);
rt3090_rf_write(sc, 2, rf | RT3593_RESCAL);
DELAY(1000);
rt3090_rf_write(sc, 2, rf & ~RT3593_RESCAL);
rf = rt3090_rf_read(sc, 17);
tmp = rf;
rf = (rf & ~0x7f) | (sc->freq & 0x7f);
rf = MIN(rf, 0x5f);
if (tmp != rf)
rt2860_mcu_cmd(sc, 0x74, (tmp << 8 ) | rf, 0);
if (sc->mac_ver == 0x5390) {
if (chan <= 4)
rf = 0x73;
else if (chan >= 5 && chan <= 6)
rf = 0x63;
else if (chan >= 7 && chan <= 10)
rf = 0x53;
else
rf = 43;
rt3090_rf_write(sc, 55, rf);
if (chan == 1)
rf = 0x0c;
else if (chan == 2)
rf = 0x0b;
else if (chan == 3)
rf = 0x0a;
else if (chan >= 4 && chan <= 6)
rf = 0x09;
else if (chan >= 7 && chan <= 12)
rf = 0x08;
else if (chan == 13)
rf = 0x07;
else
rf = 0x06;
rt3090_rf_write(sc, 59, rf);
}
/* Tx/Rx h20M */
h20mhz = (sc->rf24_20mhz & 0x20) >> 5;
rf = rt3090_rf_read(sc, 30);
rf = (rf & ~0x06) | (h20mhz << 1) | (h20mhz << 2);
rt3090_rf_write(sc, 30, rf);
/* Rx BB filter VCM */
rf = rt3090_rf_read(sc, 30);
rf = (rf & ~0x18) | 0x10;
rt3090_rf_write(sc, 30, rf);
/* Initiate VCO calibration. */
rf = rt3090_rf_read(sc, 3);
rf |= RT3593_VCOCAL;
rt3090_rf_write(sc, 3, rf);
}
static int
rt3090_rf_init(struct rt2860_softc *sc)
{
uint32_t tmp;
uint8_t rf, bbp;
int i;
rf = rt3090_rf_read(sc, 30);
/* toggle RF R30 bit 7 */
rt3090_rf_write(sc, 30, rf | 0x80);
DELAY(1000);
rt3090_rf_write(sc, 30, rf & ~0x80);
tmp = RAL_READ(sc, RT3070_LDO_CFG0);
tmp &= ~0x1f000000;
if (sc->patch_dac && sc->mac_rev < 0x0211)
tmp |= 0x0d000000; /* 1.35V */
else
tmp |= 0x01000000; /* 1.2V */
RAL_WRITE(sc, RT3070_LDO_CFG0, tmp);
/* patch LNA_PE_G1 */
tmp = RAL_READ(sc, RT3070_GPIO_SWITCH);
RAL_WRITE(sc, RT3070_GPIO_SWITCH, tmp & ~0x20);
/* initialize RF registers to default value */
for (i = 0; i < nitems(rt3090_def_rf); i++) {
rt3090_rf_write(sc, rt3090_def_rf[i].reg,
rt3090_def_rf[i].val);
}
/* select 20MHz bandwidth */
rt3090_rf_write(sc, 31, 0x14);
rf = rt3090_rf_read(sc, 6);
rt3090_rf_write(sc, 6, rf | 0x40);
if (sc->mac_ver != 0x3593) {
/* calibrate filter for 20MHz bandwidth */
sc->rf24_20mhz = 0x1f; /* default value */
rt3090_filter_calib(sc, 0x07, 0x16, &sc->rf24_20mhz);
/* select 40MHz bandwidth */
bbp = rt2860_mcu_bbp_read(sc, 4);
rt2860_mcu_bbp_write(sc, 4, (bbp & ~0x08) | 0x10);
rf = rt3090_rf_read(sc, 31);
rt3090_rf_write(sc, 31, rf | 0x20);
/* calibrate filter for 40MHz bandwidth */
sc->rf24_40mhz = 0x2f; /* default value */
rt3090_filter_calib(sc, 0x27, 0x19, &sc->rf24_40mhz);
/* go back to 20MHz bandwidth */
bbp = rt2860_mcu_bbp_read(sc, 4);
rt2860_mcu_bbp_write(sc, 4, bbp & ~0x18);
}
if (sc->mac_rev < 0x0211)
rt3090_rf_write(sc, 27, 0x03);
tmp = RAL_READ(sc, RT3070_OPT_14);
RAL_WRITE(sc, RT3070_OPT_14, tmp | 1);
if (sc->rf_rev == RT3070_RF_3020)
rt3090_set_rx_antenna(sc, 0);
bbp = rt2860_mcu_bbp_read(sc, 138);
if (sc->mac_ver == 0x3593) {
if (sc->ntxchains == 1)
bbp |= 0x60; /* turn off DAC1 and DAC2 */
else if (sc->ntxchains == 2)
bbp |= 0x40; /* turn off DAC2 */
if (sc->nrxchains == 1)
bbp &= ~0x06; /* turn off ADC1 and ADC2 */
else if (sc->nrxchains == 2)
bbp &= ~0x04; /* turn off ADC2 */
} else {
if (sc->ntxchains == 1)
bbp |= 0x20; /* turn off DAC1 */
if (sc->nrxchains == 1)
bbp &= ~0x02; /* turn off ADC1 */
}
rt2860_mcu_bbp_write(sc, 138, bbp);
rf = rt3090_rf_read(sc, 1);
rf &= ~(RT3070_RX0_PD | RT3070_TX0_PD);
rf |= RT3070_RF_BLOCK | RT3070_RX1_PD | RT3070_TX1_PD;
rt3090_rf_write(sc, 1, rf);
rf = rt3090_rf_read(sc, 15);
rt3090_rf_write(sc, 15, rf & ~RT3070_TX_LO2);
rf = rt3090_rf_read(sc, 17);
rf &= ~RT3070_TX_LO1;
if (sc->mac_rev >= 0x0211 && !sc->ext_2ghz_lna)
rf |= 0x20; /* fix for long range Rx issue */
if (sc->txmixgain_2ghz >= 2)
rf = (rf & ~0x7) | sc->txmixgain_2ghz;
rt3090_rf_write(sc, 17, rf);
rf = rt3090_rf_read(sc, 20);
rt3090_rf_write(sc, 20, rf & ~RT3070_RX_LO1);
rf = rt3090_rf_read(sc, 21);
rt3090_rf_write(sc, 21, rf & ~RT3070_RX_LO2);
return (0);
}
static void
rt5390_rf_init(struct rt2860_softc *sc)
{
uint8_t rf, bbp;
int i;
rf = rt3090_rf_read(sc, 2);
/* Toggle RF R2 bit 7. */
rt3090_rf_write(sc, 2, rf | RT3593_RESCAL);
DELAY(1000);
rt3090_rf_write(sc, 2, rf & ~RT3593_RESCAL);
/* Initialize RF registers to default value. */
if (sc->mac_ver == 0x5392) {
for (i = 0; i < nitems(rt5392_def_rf); i++) {
rt3090_rf_write(sc, rt5392_def_rf[i].reg,
rt5392_def_rf[i].val);
}
} else {
for (i = 0; i < nitems(rt5390_def_rf); i++) {
rt3090_rf_write(sc, rt5390_def_rf[i].reg,
rt5390_def_rf[i].val);
}
}
sc->rf24_20mhz = 0x1f;
sc->rf24_40mhz = 0x2f;
if (sc->mac_rev < 0x0211)
rt3090_rf_write(sc, 27, 0x03);
/* Set led open drain enable. */
RAL_WRITE(sc, RT3070_OPT_14, RAL_READ(sc, RT3070_OPT_14) | 1);
RAL_WRITE(sc, RT2860_TX_SW_CFG1, 0);
RAL_WRITE(sc, RT2860_TX_SW_CFG2, 0);
if (sc->mac_ver == 0x5390)
rt3090_set_rx_antenna(sc, 0);
/* Patch RSSI inaccurate issue. */
rt2860_mcu_bbp_write(sc, 79, 0x13);
rt2860_mcu_bbp_write(sc, 80, 0x05);
rt2860_mcu_bbp_write(sc, 81, 0x33);
/* Enable DC filter. */
if (sc->mac_rev >= 0x0211)
rt2860_mcu_bbp_write(sc, 103, 0xc0);
bbp = rt2860_mcu_bbp_read(sc, 138);
if (sc->ntxchains == 1)
bbp |= 0x20; /* Turn off DAC1. */
if (sc->nrxchains == 1)
bbp &= ~0x02; /* Turn off ADC1. */
rt2860_mcu_bbp_write(sc, 138, bbp);
/* Enable RX LO1 and LO2. */
rt3090_rf_write(sc, 38, rt3090_rf_read(sc, 38) & ~RT5390_RX_LO1);
rt3090_rf_write(sc, 39, rt3090_rf_read(sc, 39) & ~RT5390_RX_LO2);
/* Avoid data lost and CRC error. */
rt2860_mcu_bbp_write(sc, 4,
rt2860_mcu_bbp_read(sc, 4) | RT5390_MAC_IF_CTRL);
rf = rt3090_rf_read(sc, 30);
rf = (rf & ~0x18) | 0x10;
rt3090_rf_write(sc, 30, rf);
}
static void
rt3090_rf_wakeup(struct rt2860_softc *sc)
{
uint32_t tmp;
uint8_t rf;
if (sc->mac_ver == 0x3593) {
/* enable VCO */
rf = rt3090_rf_read(sc, 1);
rt3090_rf_write(sc, 1, rf | RT3593_VCO);
/* initiate VCO calibration */
rf = rt3090_rf_read(sc, 3);
rt3090_rf_write(sc, 3, rf | RT3593_VCOCAL);
/* enable VCO bias current control */
rf = rt3090_rf_read(sc, 6);
rt3090_rf_write(sc, 6, rf | RT3593_VCO_IC);
/* initiate res calibration */
rf = rt3090_rf_read(sc, 2);
rt3090_rf_write(sc, 2, rf | RT3593_RESCAL);
/* set reference current control to 0.33 mA */
rf = rt3090_rf_read(sc, 22);
rf &= ~RT3593_CP_IC_MASK;
rf |= 1 << RT3593_CP_IC_SHIFT;
rt3090_rf_write(sc, 22, rf);
/* enable RX CTB */
rf = rt3090_rf_read(sc, 46);
rt3090_rf_write(sc, 46, rf | RT3593_RX_CTB);
rf = rt3090_rf_read(sc, 20);
rf &= ~(RT3593_LDO_RF_VC_MASK | RT3593_LDO_PLL_VC_MASK);
rt3090_rf_write(sc, 20, rf);
} else {
/* enable RF block */
rf = rt3090_rf_read(sc, 1);
rt3090_rf_write(sc, 1, rf | RT3070_RF_BLOCK);
/* enable VCO bias current control */
rf = rt3090_rf_read(sc, 7);
rt3090_rf_write(sc, 7, rf | 0x30);
rf = rt3090_rf_read(sc, 9);
rt3090_rf_write(sc, 9, rf | 0x0e);
/* enable RX CTB */
rf = rt3090_rf_read(sc, 21);
rt3090_rf_write(sc, 21, rf | RT3070_RX_CTB);
/* fix Tx to Rx IQ glitch by raising RF voltage */
rf = rt3090_rf_read(sc, 27);
rf &= ~0x77;
if (sc->mac_rev < 0x0211)
rf |= 0x03;
rt3090_rf_write(sc, 27, rf);
}
if (sc->patch_dac && sc->mac_rev < 0x0211) {
tmp = RAL_READ(sc, RT3070_LDO_CFG0);
tmp = (tmp & ~0x1f000000) | 0x0d000000;
RAL_WRITE(sc, RT3070_LDO_CFG0, tmp);
}
}
static void
rt5390_rf_wakeup(struct rt2860_softc *sc)
{
uint32_t tmp;
uint8_t rf;
rf = rt3090_rf_read(sc, 1);
rf |= RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD |
RT3070_TX0_PD;
if (sc->mac_ver == 0x5392)
rf |= RT3070_RX1_PD | RT3070_TX1_PD;
rt3090_rf_write(sc, 1, rf);
rf = rt3090_rf_read(sc, 6);
rf |= RT3593_VCO_IC | RT3593_VCOCAL;
if (sc->mac_ver == 0x5390)
rf &= ~RT3593_VCO_IC;
rt3090_rf_write(sc, 6, rf);
rt3090_rf_write(sc, 2, rt3090_rf_read(sc, 2) | RT3593_RESCAL);
rf = rt3090_rf_read(sc, 22);
rf = (rf & ~0xe0) | 0x20;
rt3090_rf_write(sc, 22, rf);
rt3090_rf_write(sc, 42, rt3090_rf_read(sc, 42) | RT5390_RX_CTB);
rt3090_rf_write(sc, 20, rt3090_rf_read(sc, 20) & ~0x77);
rt3090_rf_write(sc, 3, rt3090_rf_read(sc, 3) | RT3593_VCOCAL);
if (sc->patch_dac && sc->mac_rev < 0x0211) {
tmp = RAL_READ(sc, RT3070_LDO_CFG0);
tmp = (tmp & ~0x1f000000) | 0x0d000000;
RAL_WRITE(sc, RT3070_LDO_CFG0, tmp);
}
}
static int
rt3090_filter_calib(struct rt2860_softc *sc, uint8_t init, uint8_t target,
uint8_t *val)
{
uint8_t rf22, rf24;
uint8_t bbp55_pb, bbp55_sb, delta;
int ntries;
/* program filter */
rf24 = rt3090_rf_read(sc, 24);
rf24 = (rf24 & 0xc0) | init; /* initial filter value */
rt3090_rf_write(sc, 24, rf24);
/* enable baseband loopback mode */
rf22 = rt3090_rf_read(sc, 22);
rt3090_rf_write(sc, 22, rf22 | RT3070_BB_LOOPBACK);
/* set power and frequency of passband test tone */
rt2860_mcu_bbp_write(sc, 24, 0x00);
for (ntries = 0; ntries < 100; ntries++) {
/* transmit test tone */
rt2860_mcu_bbp_write(sc, 25, 0x90);
DELAY(1000);
/* read received power */
bbp55_pb = rt2860_mcu_bbp_read(sc, 55);
if (bbp55_pb != 0)
break;
}
if (ntries == 100)
return (ETIMEDOUT);
/* set power and frequency of stopband test tone */
rt2860_mcu_bbp_write(sc, 24, 0x06);
for (ntries = 0; ntries < 100; ntries++) {
/* transmit test tone */
rt2860_mcu_bbp_write(sc, 25, 0x90);
DELAY(1000);
/* read received power */
bbp55_sb = rt2860_mcu_bbp_read(sc, 55);
delta = bbp55_pb - bbp55_sb;
if (delta > target)
break;
/* reprogram filter */
rf24++;
rt3090_rf_write(sc, 24, rf24);
}
if (ntries < 100) {
if (rf24 != init)
rf24--; /* backtrack */
*val = rf24;
rt3090_rf_write(sc, 24, rf24);
}
/* restore initial state */
rt2860_mcu_bbp_write(sc, 24, 0x00);
/* disable baseband loopback mode */
rf22 = rt3090_rf_read(sc, 22);
rt3090_rf_write(sc, 22, rf22 & ~RT3070_BB_LOOPBACK);
return (0);
}
static void
rt3090_rf_setup(struct rt2860_softc *sc)
{
uint8_t bbp;
int i;
if (sc->mac_rev >= 0x0211) {
/* enable DC filter */
rt2860_mcu_bbp_write(sc, 103, 0xc0);
/* improve power consumption */
bbp = rt2860_mcu_bbp_read(sc, 31);
rt2860_mcu_bbp_write(sc, 31, bbp & ~0x03);
}
RAL_WRITE(sc, RT2860_TX_SW_CFG1, 0);
if (sc->mac_rev < 0x0211) {
RAL_WRITE(sc, RT2860_TX_SW_CFG2,
sc->patch_dac ? 0x2c : 0x0f);
} else
RAL_WRITE(sc, RT2860_TX_SW_CFG2, 0);
/* initialize RF registers from ROM */
if (sc->mac_ver < 0x5390) {
for (i = 0; i < 10; i++) {
if (sc->rf[i].reg == 0 || sc->rf[i].reg == 0xff)
continue;
rt3090_rf_write(sc, sc->rf[i].reg, sc->rf[i].val);
}
}
}
static void
rt2860_set_leds(struct rt2860_softc *sc, uint16_t which)
{
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LEDS,
which | (sc->leds & 0x7f), 0);
}
/*
* Hardware has a general-purpose programmable timer interrupt that can
* periodically raise MAC_INT_4.
*/
static void
rt2860_set_gp_timer(struct rt2860_softc *sc, int ms)
{
uint32_t tmp;
/* disable GP timer before reprogramming it */
tmp = RAL_READ(sc, RT2860_INT_TIMER_EN);
RAL_WRITE(sc, RT2860_INT_TIMER_EN, tmp & ~RT2860_GP_TIMER_EN);
if (ms == 0)
return;
tmp = RAL_READ(sc, RT2860_INT_TIMER_CFG);
ms *= 16; /* Unit: 64us */
tmp = (tmp & 0xffff) | ms << RT2860_GP_TIMER_SHIFT;
RAL_WRITE(sc, RT2860_INT_TIMER_CFG, tmp);
/* enable GP timer */
tmp = RAL_READ(sc, RT2860_INT_TIMER_EN);
RAL_WRITE(sc, RT2860_INT_TIMER_EN, tmp | RT2860_GP_TIMER_EN);
}
static void
rt2860_set_bssid(struct rt2860_softc *sc, const uint8_t *bssid)
{
RAL_WRITE(sc, RT2860_MAC_BSSID_DW0,
bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24);
RAL_WRITE(sc, RT2860_MAC_BSSID_DW1,
bssid[4] | bssid[5] << 8);
}
static void
rt2860_set_macaddr(struct rt2860_softc *sc, const uint8_t *addr)
{
RAL_WRITE(sc, RT2860_MAC_ADDR_DW0,
addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
RAL_WRITE(sc, RT2860_MAC_ADDR_DW1,
addr[4] | addr[5] << 8 | 0xff << 16);
}
static void
rt2860_updateslot(struct ieee80211com *ic)
{
struct rt2860_softc *sc = ic->ic_softc;
uint32_t tmp;
tmp = RAL_READ(sc, RT2860_BKOFF_SLOT_CFG);
tmp &= ~0xff;
tmp |= (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20;
RAL_WRITE(sc, RT2860_BKOFF_SLOT_CFG, tmp);
}
static void
-rt2860_updateprot(struct ifnet *ifp)
+rt2860_updateprot(struct rt2860_softc *sc)
{
- struct rt2860_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL;
/* setup protection frame rate (MCS code) */
tmp |= IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) ?
rt2860_rates[RT2860_RIDX_OFDM6].mcs :
rt2860_rates[RT2860_RIDX_CCK11].mcs;
/* CCK frames don't require protection */
RAL_WRITE(sc, RT2860_CCK_PROT_CFG, tmp);
if (ic->ic_flags & IEEE80211_F_USEPROT) {
if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
tmp |= RT2860_PROT_CTRL_RTS_CTS;
else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
tmp |= RT2860_PROT_CTRL_CTS;
}
RAL_WRITE(sc, RT2860_OFDM_PROT_CFG, tmp);
}
static void
rt2860_update_promisc(struct ieee80211com *ic)
{
struct rt2860_softc *sc = ic->ic_softc;
uint32_t tmp;
tmp = RAL_READ(sc, RT2860_RX_FILTR_CFG);
tmp &= ~RT2860_DROP_NOT_MYBSS;
- if (!(ic->ic_ifp->if_flags & IFF_PROMISC))
+ if (ic->ic_promisc == 0)
tmp |= RT2860_DROP_NOT_MYBSS;
RAL_WRITE(sc, RT2860_RX_FILTR_CFG, tmp);
}
static int
rt2860_updateedca(struct ieee80211com *ic)
{
- struct rt2860_softc *sc = ic->ic_ifp->if_softc;
+ struct rt2860_softc *sc = ic->ic_softc;
const struct wmeParams *wmep;
int aci;
wmep = ic->ic_wme.wme_chanParams.cap_wmeParams;
/* update MAC TX configuration registers */
for (aci = 0; aci < WME_NUM_AC; aci++) {
RAL_WRITE(sc, RT2860_EDCA_AC_CFG(aci),
wmep[aci].wmep_logcwmax << 16 |
wmep[aci].wmep_logcwmin << 12 |
wmep[aci].wmep_aifsn << 8 |
wmep[aci].wmep_txopLimit);
}
/* update SCH/DMA registers too */
RAL_WRITE(sc, RT2860_WMM_AIFSN_CFG,
wmep[WME_AC_VO].wmep_aifsn << 12 |
wmep[WME_AC_VI].wmep_aifsn << 8 |
wmep[WME_AC_BK].wmep_aifsn << 4 |
wmep[WME_AC_BE].wmep_aifsn);
RAL_WRITE(sc, RT2860_WMM_CWMIN_CFG,
wmep[WME_AC_VO].wmep_logcwmin << 12 |
wmep[WME_AC_VI].wmep_logcwmin << 8 |
wmep[WME_AC_BK].wmep_logcwmin << 4 |
wmep[WME_AC_BE].wmep_logcwmin);
RAL_WRITE(sc, RT2860_WMM_CWMAX_CFG,
wmep[WME_AC_VO].wmep_logcwmax << 12 |
wmep[WME_AC_VI].wmep_logcwmax << 8 |
wmep[WME_AC_BK].wmep_logcwmax << 4 |
wmep[WME_AC_BE].wmep_logcwmax);
RAL_WRITE(sc, RT2860_WMM_TXOP0_CFG,
wmep[WME_AC_BK].wmep_txopLimit << 16 |
wmep[WME_AC_BE].wmep_txopLimit);
RAL_WRITE(sc, RT2860_WMM_TXOP1_CFG,
wmep[WME_AC_VO].wmep_txopLimit << 16 |
wmep[WME_AC_VI].wmep_txopLimit);
return 0;
}
#ifdef HW_CRYPTO
static int
rt2860_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
struct ieee80211_key *k)
{
struct rt2860_softc *sc = ic->ic_softc;
bus_size_t base;
uint32_t attr;
uint8_t mode, wcid, iv[8];
/* defer setting of WEP keys until interface is brought up */
if ((ic->ic_if.if_flags & (IFF_UP | IFF_RUNNING)) !=
(IFF_UP | IFF_RUNNING))
return 0;
/* map net80211 cipher to RT2860 security mode */
switch (k->k_cipher) {
case IEEE80211_CIPHER_WEP40:
mode = RT2860_MODE_WEP40;
break;
case IEEE80211_CIPHER_WEP104:
mode = RT2860_MODE_WEP104;
break;
case IEEE80211_CIPHER_TKIP:
mode = RT2860_MODE_TKIP;
break;
case IEEE80211_CIPHER_CCMP:
mode = RT2860_MODE_AES_CCMP;
break;
default:
return EINVAL;
}
if (k->k_flags & IEEE80211_KEY_GROUP) {
wcid = 0; /* NB: update WCID0 for group keys */
base = RT2860_SKEY(0, k->k_id);
} else {
wcid = ((struct rt2860_node *)ni)->wcid;
base = RT2860_PKEY(wcid);
}
if (k->k_cipher == IEEE80211_CIPHER_TKIP) {
RAL_WRITE_REGION_1(sc, base, k->k_key, 16);
#ifndef IEEE80211_STA_ONLY
if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
RAL_WRITE_REGION_1(sc, base + 16, &k->k_key[16], 8);
RAL_WRITE_REGION_1(sc, base + 24, &k->k_key[24], 8);
} else
#endif
{
RAL_WRITE_REGION_1(sc, base + 16, &k->k_key[24], 8);
RAL_WRITE_REGION_1(sc, base + 24, &k->k_key[16], 8);
}
} else
RAL_WRITE_REGION_1(sc, base, k->k_key, k->k_len);
if (!(k->k_flags & IEEE80211_KEY_GROUP) ||
(k->k_flags & IEEE80211_KEY_TX)) {
/* set initial packet number in IV+EIV */
if (k->k_cipher == IEEE80211_CIPHER_WEP40 ||
k->k_cipher == IEEE80211_CIPHER_WEP104) {
uint32_t val = arc4random();
/* skip weak IVs from Fluhrer/Mantin/Shamir */
if (val >= 0x03ff00 && (val & 0xf8ff00) == 0x00ff00)
val += 0x000100;
iv[0] = val;
iv[1] = val >> 8;
iv[2] = val >> 16;
iv[3] = k->k_id << 6;
iv[4] = iv[5] = iv[6] = iv[7] = 0;
} else {
if (k->k_cipher == IEEE80211_CIPHER_TKIP) {
iv[0] = k->k_tsc >> 8;
iv[1] = (iv[0] | 0x20) & 0x7f;
iv[2] = k->k_tsc;
} else /* CCMP */ {
iv[0] = k->k_tsc;
iv[1] = k->k_tsc >> 8;
iv[2] = 0;
}
iv[3] = k->k_id << 6 | IEEE80211_WEP_EXTIV;
iv[4] = k->k_tsc >> 16;
iv[5] = k->k_tsc >> 24;
iv[6] = k->k_tsc >> 32;
iv[7] = k->k_tsc >> 40;
}
RAL_WRITE_REGION_1(sc, RT2860_IVEIV(wcid), iv, 8);
}
if (k->k_flags & IEEE80211_KEY_GROUP) {
/* install group key */
attr = RAL_READ(sc, RT2860_SKEY_MODE_0_7);
attr &= ~(0xf << (k->k_id * 4));
attr |= mode << (k->k_id * 4);
RAL_WRITE(sc, RT2860_SKEY_MODE_0_7, attr);
} else {
/* install pairwise key */
attr = RAL_READ(sc, RT2860_WCID_ATTR(wcid));
attr = (attr & ~0xf) | (mode << 1) | RT2860_RX_PKEY_EN;
RAL_WRITE(sc, RT2860_WCID_ATTR(wcid), attr);
}
return 0;
}
static void
rt2860_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
struct ieee80211_key *k)
{
struct rt2860_softc *sc = ic->ic_softc;
uint32_t attr;
uint8_t wcid;
if (k->k_flags & IEEE80211_KEY_GROUP) {
/* remove group key */
attr = RAL_READ(sc, RT2860_SKEY_MODE_0_7);
attr &= ~(0xf << (k->k_id * 4));
RAL_WRITE(sc, RT2860_SKEY_MODE_0_7, attr);
} else {
/* remove pairwise key */
wcid = ((struct rt2860_node *)ni)->wcid;
attr = RAL_READ(sc, RT2860_WCID_ATTR(wcid));
attr &= ~0xf;
RAL_WRITE(sc, RT2860_WCID_ATTR(wcid), attr);
}
}
#endif
static int8_t
rt2860_rssi2dbm(struct rt2860_softc *sc, uint8_t rssi, uint8_t rxchain)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_channel *c = ic->ic_curchan;
int delta;
if (IEEE80211_IS_CHAN_5GHZ(c)) {
u_int chan = ieee80211_chan2ieee(ic, c);
delta = sc->rssi_5ghz[rxchain];
/* determine channel group */
if (chan <= 64)
delta -= sc->lna[1];
else if (chan <= 128)
delta -= sc->lna[2];
else
delta -= sc->lna[3];
} else
delta = sc->rssi_2ghz[rxchain] - sc->lna[0];
return -12 - delta - rssi;
}
/*
* Add `delta' (signed) to each 4-bit sub-word of a 32-bit word.
* Used to adjust per-rate Tx power registers.
*/
static __inline uint32_t
b4inc(uint32_t b32, int8_t delta)
{
int8_t i, b4;
for (i = 0; i < 8; i++) {
b4 = b32 & 0xf;
b4 += delta;
if (b4 < 0)
b4 = 0;
else if (b4 > 0xf)
b4 = 0xf;
b32 = b32 >> 4 | b4 << 28;
}
return b32;
}
static const char *
rt2860_get_rf(uint8_t rev)
{
switch (rev) {
case RT2860_RF_2820: return "RT2820";
case RT2860_RF_2850: return "RT2850";
case RT2860_RF_2720: return "RT2720";
case RT2860_RF_2750: return "RT2750";
case RT3070_RF_3020: return "RT3020";
case RT3070_RF_2020: return "RT2020";
case RT3070_RF_3021: return "RT3021";
case RT3070_RF_3022: return "RT3022";
case RT3070_RF_3052: return "RT3052";
case RT3070_RF_3320: return "RT3320";
case RT3070_RF_3053: return "RT3053";
case RT5390_RF_5390: return "RT5390";
default: return "unknown";
}
}
static int
rt2860_read_eeprom(struct rt2860_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
{
int8_t delta_2ghz, delta_5ghz;
uint32_t tmp;
uint16_t val;
int ridx, ant, i;
/* check whether the ROM is eFUSE ROM or EEPROM */
sc->sc_srom_read = rt2860_eeprom_read_2;
if (sc->mac_ver >= 0x3071) {
tmp = RAL_READ(sc, RT3070_EFUSE_CTRL);
DPRINTF(("EFUSE_CTRL=0x%08x\n", tmp));
if (tmp & RT3070_SEL_EFUSE)
sc->sc_srom_read = rt3090_efuse_read_2;
}
/* read EEPROM version */
val = rt2860_srom_read(sc, RT2860_EEPROM_VERSION);
DPRINTF(("EEPROM rev=%d, FAE=%d\n", val & 0xff, val >> 8));
/* read MAC address */
val = rt2860_srom_read(sc, RT2860_EEPROM_MAC01);
macaddr[0] = val & 0xff;
macaddr[1] = val >> 8;
val = rt2860_srom_read(sc, RT2860_EEPROM_MAC23);
macaddr[2] = val & 0xff;
macaddr[3] = val >> 8;
val = rt2860_srom_read(sc, RT2860_EEPROM_MAC45);
macaddr[4] = val & 0xff;
macaddr[5] = val >> 8;
/* read country code */
val = rt2860_srom_read(sc, RT2860_EEPROM_COUNTRY);
DPRINTF(("EEPROM region code=0x%04x\n", val));
/* read vendor BBP settings */
for (i = 0; i < 8; i++) {
val = rt2860_srom_read(sc, RT2860_EEPROM_BBP_BASE + i);
sc->bbp[i].val = val & 0xff;
sc->bbp[i].reg = val >> 8;
DPRINTF(("BBP%d=0x%02x\n", sc->bbp[i].reg, sc->bbp[i].val));
}
if (sc->mac_ver >= 0x3071) {
/* read vendor RF settings */
for (i = 0; i < 10; i++) {
val = rt2860_srom_read(sc, RT3071_EEPROM_RF_BASE + i);
sc->rf[i].val = val & 0xff;
sc->rf[i].reg = val >> 8;
DPRINTF(("RF%d=0x%02x\n", sc->rf[i].reg,
sc->rf[i].val));
}
}
/* read RF frequency offset from EEPROM */
val = rt2860_srom_read(sc, RT2860_EEPROM_FREQ_LEDS);
sc->freq = ((val & 0xff) != 0xff) ? val & 0xff : 0;
DPRINTF(("EEPROM freq offset %d\n", sc->freq & 0xff));
if ((val >> 8) != 0xff) {
/* read LEDs operating mode */
sc->leds = val >> 8;
sc->led[0] = rt2860_srom_read(sc, RT2860_EEPROM_LED1);
sc->led[1] = rt2860_srom_read(sc, RT2860_EEPROM_LED2);
sc->led[2] = rt2860_srom_read(sc, RT2860_EEPROM_LED3);
} else {
/* broken EEPROM, use default settings */
sc->leds = 0x01;
sc->led[0] = 0x5555;
sc->led[1] = 0x2221;
sc->led[2] = 0xa9f8;
}
DPRINTF(("EEPROM LED mode=0x%02x, LEDs=0x%04x/0x%04x/0x%04x\n",
sc->leds, sc->led[0], sc->led[1], sc->led[2]));
/* read RF information */
val = rt2860_srom_read(sc, RT2860_EEPROM_ANTENNA);
if (val == 0xffff) {
DPRINTF(("invalid EEPROM antenna info, using default\n"));
if (sc->mac_ver >= 0x5390) {
/* default to RF5390 */
sc->rf_rev = RT5390_RF_5390;
sc->ntxchains = (sc->mac_ver == 0x5392) ? 2 : 1;
sc->nrxchains = (sc->mac_ver == 0x5392) ? 2 : 1;
} else if (sc->mac_ver == 0x3593) {
/* default to RF3053 3T3R */
sc->rf_rev = RT3070_RF_3053;
sc->ntxchains = 3;
sc->nrxchains = 3;
} else if (sc->mac_ver >= 0x3071) {
/* default to RF3020 1T1R */
sc->rf_rev = RT3070_RF_3020;
sc->ntxchains = 1;
sc->nrxchains = 1;
} else {
/* default to RF2820 1T2R */
sc->rf_rev = RT2860_RF_2820;
sc->ntxchains = 1;
sc->nrxchains = 2;
}
} else {
sc->rf_rev = (val >> 8) & 0xf;
if (sc->mac_ver >= 0x5390) {
sc->ntxchains = (sc->mac_ver == 0x5392) ? 2 : 1;
sc->nrxchains = (sc->mac_ver == 0x5392) ? 2 : 1;
} else {
sc->ntxchains = (val >> 4) & 0xf;
sc->nrxchains = val & 0xf;
}
}
DPRINTF(("EEPROM RF rev=0x%02x chains=%dT%dR\n",
sc->rf_rev, sc->ntxchains, sc->nrxchains));
/* check if RF supports automatic Tx access gain control */
val = rt2860_srom_read(sc, RT2860_EEPROM_CONFIG);
DPRINTF(("EEPROM CFG 0x%04x\n", val));
/* check if driver should patch the DAC issue */
if ((val >> 8) != 0xff)
sc->patch_dac = (val >> 15) & 1;
if ((val & 0xff) != 0xff) {
sc->ext_5ghz_lna = (val >> 3) & 1;
sc->ext_2ghz_lna = (val >> 2) & 1;
/* check if RF supports automatic Tx access gain control */
sc->calib_2ghz = sc->calib_5ghz = 0; /* XXX (val >> 1) & 1 */;
/* check if we have a hardware radio switch */
sc->rfswitch = val & 1;
}
if (sc->sc_flags & RT2860_ADVANCED_PS) {
/* read PCIe power save level */
val = rt2860_srom_read(sc, RT2860_EEPROM_PCIE_PSLEVEL);
if ((val & 0xff) != 0xff) {
sc->pslevel = val & 0x3;
val = rt2860_srom_read(sc, RT2860_EEPROM_REV);
if ((val & 0xff80) != 0x9280)
sc->pslevel = MIN(sc->pslevel, 1);
DPRINTF(("EEPROM PCIe PS Level=%d\n", sc->pslevel));
}
}
/* read power settings for 2GHz channels */
for (i = 0; i < 14; i += 2) {
val = rt2860_srom_read(sc,
RT2860_EEPROM_PWR2GHZ_BASE1 + i / 2);
sc->txpow1[i + 0] = (int8_t)(val & 0xff);
sc->txpow1[i + 1] = (int8_t)(val >> 8);
if (sc->mac_ver != 0x5390) {
val = rt2860_srom_read(sc,
RT2860_EEPROM_PWR2GHZ_BASE2 + i / 2);
sc->txpow2[i + 0] = (int8_t)(val & 0xff);
sc->txpow2[i + 1] = (int8_t)(val >> 8);
}
}
/* fix broken Tx power entries */
for (i = 0; i < 14; i++) {
if (sc->txpow1[i] < 0 ||
sc->txpow1[i] > ((sc->mac_ver >= 0x5390) ? 39 : 31))
sc->txpow1[i] = 5;
if (sc->mac_ver != 0x5390) {
if (sc->txpow2[i] < 0 ||
sc->txpow2[i] > ((sc->mac_ver == 0x5392) ? 39 : 31))
sc->txpow2[i] = 5;
}
DPRINTF(("chan %d: power1=%d, power2=%d\n",
rt2860_rf2850[i].chan, sc->txpow1[i], sc->txpow2[i]));
}
/* read power settings for 5GHz channels */
for (i = 0; i < 40; i += 2) {
val = rt2860_srom_read(sc,
RT2860_EEPROM_PWR5GHZ_BASE1 + i / 2);
sc->txpow1[i + 14] = (int8_t)(val & 0xff);
sc->txpow1[i + 15] = (int8_t)(val >> 8);
val = rt2860_srom_read(sc,
RT2860_EEPROM_PWR5GHZ_BASE2 + i / 2);
sc->txpow2[i + 14] = (int8_t)(val & 0xff);
sc->txpow2[i + 15] = (int8_t)(val >> 8);
}
/* fix broken Tx power entries */
for (i = 0; i < 40; i++) {
if (sc->txpow1[14 + i] < -7 || sc->txpow1[14 + i] > 15)
sc->txpow1[14 + i] = 5;
if (sc->txpow2[14 + i] < -7 || sc->txpow2[14 + i] > 15)
sc->txpow2[14 + i] = 5;
DPRINTF(("chan %d: power1=%d, power2=%d\n",
rt2860_rf2850[14 + i].chan, sc->txpow1[14 + i],
sc->txpow2[14 + i]));
}
/* read Tx power compensation for each Tx rate */
val = rt2860_srom_read(sc, RT2860_EEPROM_DELTAPWR);
delta_2ghz = delta_5ghz = 0;
if ((val & 0xff) != 0xff && (val & 0x80)) {
delta_2ghz = val & 0xf;
if (!(val & 0x40)) /* negative number */
delta_2ghz = -delta_2ghz;
}
val >>= 8;
if ((val & 0xff) != 0xff && (val & 0x80)) {
delta_5ghz = val & 0xf;
if (!(val & 0x40)) /* negative number */
delta_5ghz = -delta_5ghz;
}
DPRINTF(("power compensation=%d (2GHz), %d (5GHz)\n",
delta_2ghz, delta_5ghz));
for (ridx = 0; ridx < 5; ridx++) {
uint32_t reg;
val = rt2860_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2);
reg = val;
val = rt2860_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2 + 1);
reg |= (uint32_t)val << 16;
sc->txpow20mhz[ridx] = reg;
sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz);
sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz);
DPRINTF(("ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, "
"40MHz/5GHz=0x%08x\n", ridx, sc->txpow20mhz[ridx],
sc->txpow40mhz_2ghz[ridx], sc->txpow40mhz_5ghz[ridx]));
}
/* read factory-calibrated samples for temperature compensation */
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI1_2GHZ);
sc->tssi_2ghz[0] = val & 0xff; /* [-4] */
sc->tssi_2ghz[1] = val >> 8; /* [-3] */
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI2_2GHZ);
sc->tssi_2ghz[2] = val & 0xff; /* [-2] */
sc->tssi_2ghz[3] = val >> 8; /* [-1] */
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI3_2GHZ);
sc->tssi_2ghz[4] = val & 0xff; /* [+0] */
sc->tssi_2ghz[5] = val >> 8; /* [+1] */
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI4_2GHZ);
sc->tssi_2ghz[6] = val & 0xff; /* [+2] */
sc->tssi_2ghz[7] = val >> 8; /* [+3] */
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI5_2GHZ);
sc->tssi_2ghz[8] = val & 0xff; /* [+4] */
sc->step_2ghz = val >> 8;
DPRINTF(("TSSI 2GHz: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x "
"0x%02x 0x%02x step=%d\n", sc->tssi_2ghz[0], sc->tssi_2ghz[1],
sc->tssi_2ghz[2], sc->tssi_2ghz[3], sc->tssi_2ghz[4],
sc->tssi_2ghz[5], sc->tssi_2ghz[6], sc->tssi_2ghz[7],
sc->tssi_2ghz[8], sc->step_2ghz));
/* check that ref value is correct, otherwise disable calibration */
if (sc->tssi_2ghz[4] == 0xff)
sc->calib_2ghz = 0;
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI1_5GHZ);
sc->tssi_5ghz[0] = val & 0xff; /* [-4] */
sc->tssi_5ghz[1] = val >> 8; /* [-3] */
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI2_5GHZ);
sc->tssi_5ghz[2] = val & 0xff; /* [-2] */
sc->tssi_5ghz[3] = val >> 8; /* [-1] */
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI3_5GHZ);
sc->tssi_5ghz[4] = val & 0xff; /* [+0] */
sc->tssi_5ghz[5] = val >> 8; /* [+1] */
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI4_5GHZ);
sc->tssi_5ghz[6] = val & 0xff; /* [+2] */
sc->tssi_5ghz[7] = val >> 8; /* [+3] */
val = rt2860_srom_read(sc, RT2860_EEPROM_TSSI5_5GHZ);
sc->tssi_5ghz[8] = val & 0xff; /* [+4] */
sc->step_5ghz = val >> 8;
DPRINTF(("TSSI 5GHz: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x "
"0x%02x 0x%02x step=%d\n", sc->tssi_5ghz[0], sc->tssi_5ghz[1],
sc->tssi_5ghz[2], sc->tssi_5ghz[3], sc->tssi_5ghz[4],
sc->tssi_5ghz[5], sc->tssi_5ghz[6], sc->tssi_5ghz[7],
sc->tssi_5ghz[8], sc->step_5ghz));
/* check that ref value is correct, otherwise disable calibration */
if (sc->tssi_5ghz[4] == 0xff)
sc->calib_5ghz = 0;
/* read RSSI offsets and LNA gains from EEPROM */
val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI1_2GHZ);
sc->rssi_2ghz[0] = val & 0xff; /* Ant A */
sc->rssi_2ghz[1] = val >> 8; /* Ant B */
val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI2_2GHZ);
if (sc->mac_ver >= 0x3071) {
/*
* On RT3090 chips (limited to 2 Rx chains), this ROM
* field contains the Tx mixer gain for the 2GHz band.
*/
if ((val & 0xff) != 0xff)
sc->txmixgain_2ghz = val & 0x7;
DPRINTF(("tx mixer gain=%u (2GHz)\n", sc->txmixgain_2ghz));
} else
sc->rssi_2ghz[2] = val & 0xff; /* Ant C */
sc->lna[2] = val >> 8; /* channel group 2 */
val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI1_5GHZ);
sc->rssi_5ghz[0] = val & 0xff; /* Ant A */
sc->rssi_5ghz[1] = val >> 8; /* Ant B */
val = rt2860_srom_read(sc, RT2860_EEPROM_RSSI2_5GHZ);
sc->rssi_5ghz[2] = val & 0xff; /* Ant C */
sc->lna[3] = val >> 8; /* channel group 3 */
val = rt2860_srom_read(sc, RT2860_EEPROM_LNA);
if (sc->mac_ver >= 0x3071)
sc->lna[0] = RT3090_DEF_LNA;
else /* channel group 0 */
sc->lna[0] = val & 0xff;
sc->lna[1] = val >> 8; /* channel group 1 */
/* fix broken 5GHz LNA entries */
if (sc->lna[2] == 0 || sc->lna[2] == 0xff) {
DPRINTF(("invalid LNA for channel group %d\n", 2));
sc->lna[2] = sc->lna[1];
}
if (sc->lna[3] == 0 || sc->lna[3] == 0xff) {
DPRINTF(("invalid LNA for channel group %d\n", 3));
sc->lna[3] = sc->lna[1];
}
/* fix broken RSSI offset entries */
for (ant = 0; ant < 3; ant++) {
if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) {
DPRINTF(("invalid RSSI%d offset: %d (2GHz)\n",
ant + 1, sc->rssi_2ghz[ant]));
sc->rssi_2ghz[ant] = 0;
}
if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) {
DPRINTF(("invalid RSSI%d offset: %d (5GHz)\n",
ant + 1, sc->rssi_5ghz[ant]));
sc->rssi_5ghz[ant] = 0;
}
}
return 0;
}
static int
rt2860_bbp_init(struct rt2860_softc *sc)
{
int i, ntries;
/* wait for BBP to wake up */
for (ntries = 0; ntries < 20; ntries++) {
uint8_t bbp0 = rt2860_mcu_bbp_read(sc, 0);
if (bbp0 != 0 && bbp0 != 0xff)
break;
}
if (ntries == 20) {
device_printf(sc->sc_dev,
"timeout waiting for BBP to wake up\n");
return (ETIMEDOUT);
}
/* initialize BBP registers to default values */
if (sc->mac_ver >= 0x5390)
rt5390_bbp_init(sc);
else {
for (i = 0; i < nitems(rt2860_def_bbp); i++) {
rt2860_mcu_bbp_write(sc, rt2860_def_bbp[i].reg,
rt2860_def_bbp[i].val);
}
}
/* fix BBP84 for RT2860E */
if (sc->mac_ver == 0x2860 && sc->mac_rev != 0x0101)
rt2860_mcu_bbp_write(sc, 84, 0x19);
if (sc->mac_ver >= 0x3071) {
rt2860_mcu_bbp_write(sc, 79, 0x13);
rt2860_mcu_bbp_write(sc, 80, 0x05);
rt2860_mcu_bbp_write(sc, 81, 0x33);
} else if (sc->mac_ver == 0x2860 && sc->mac_rev == 0x0100) {
rt2860_mcu_bbp_write(sc, 69, 0x16);
rt2860_mcu_bbp_write(sc, 73, 0x12);
}
return 0;
}
static void
rt5390_bbp_init(struct rt2860_softc *sc)
{
uint8_t bbp;
int i;
/* Apply maximum likelihood detection for 2 stream case. */
if (sc->nrxchains > 1) {
bbp = rt2860_mcu_bbp_read(sc, 105);
rt2860_mcu_bbp_write(sc, 105, bbp | RT5390_MLD);
}
/* Avoid data lost and CRC error. */
bbp = rt2860_mcu_bbp_read(sc, 4);
rt2860_mcu_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL);
for (i = 0; i < nitems(rt5390_def_bbp); i++) {
rt2860_mcu_bbp_write(sc, rt5390_def_bbp[i].reg,
rt5390_def_bbp[i].val);
}
if (sc->mac_ver == 0x5392) {
rt2860_mcu_bbp_write(sc, 84, 0x9a);
rt2860_mcu_bbp_write(sc, 95, 0x9a);
rt2860_mcu_bbp_write(sc, 98, 0x12);
rt2860_mcu_bbp_write(sc, 106, 0x05);
rt2860_mcu_bbp_write(sc, 134, 0xd0);
rt2860_mcu_bbp_write(sc, 135, 0xf6);
}
bbp = rt2860_mcu_bbp_read(sc, 152);
rt2860_mcu_bbp_write(sc, 152, bbp | 0x80);
/* Disable hardware antenna diversity. */
if (sc->mac_ver == 0x5390)
rt2860_mcu_bbp_write(sc, 154, 0);
}
static int
rt2860_txrx_enable(struct rt2860_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
int ntries;
/* enable Tx/Rx DMA engine */
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_TX_EN);
RAL_BARRIER_READ_WRITE(sc);
for (ntries = 0; ntries < 200; ntries++) {
tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG);
if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0)
break;
DELAY(1000);
}
if (ntries == 200) {
device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
return ETIMEDOUT;
}
DELAY(50);
tmp |= RT2860_RX_DMA_EN | RT2860_TX_DMA_EN |
RT2860_WPDMA_BT_SIZE64 << RT2860_WPDMA_BT_SIZE_SHIFT;
RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp);
/* set Rx filter */
tmp = RT2860_DROP_CRC_ERR | RT2860_DROP_PHY_ERR;
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
tmp |= RT2860_DROP_UC_NOME | RT2860_DROP_DUPL |
RT2860_DROP_CTS | RT2860_DROP_BA | RT2860_DROP_ACK |
RT2860_DROP_VER_ERR | RT2860_DROP_CTRL_RSV |
RT2860_DROP_CFACK | RT2860_DROP_CFEND;
if (ic->ic_opmode == IEEE80211_M_STA)
tmp |= RT2860_DROP_RTS | RT2860_DROP_PSPOLL;
}
RAL_WRITE(sc, RT2860_RX_FILTR_CFG, tmp);
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL,
RT2860_MAC_RX_EN | RT2860_MAC_TX_EN);
return 0;
}
static void
rt2860_init(void *arg)
{
struct rt2860_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
RAL_LOCK(sc);
rt2860_init_locked(sc);
RAL_UNLOCK(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_flags & RT2860_RUNNNING)
ieee80211_start_all(ic);
}
static void
rt2860_init_locked(struct rt2860_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
uint8_t bbp1, bbp3;
int i, qid, ridx, ntries, error;
RAL_LOCK_ASSERT(sc);
if (sc->rfswitch) {
/* hardware has a radio switch on GPIO pin 2 */
if (!(RAL_READ(sc, RT2860_GPIO_CTRL) & (1 << 2))) {
device_printf(sc->sc_dev,
"radio is disabled by hardware switch\n");
#ifdef notyet
rt2860_stop_locked(sc);
return;
#endif
}
}
RAL_WRITE(sc, RT2860_PWR_PIN_CFG, RT2860_IO_RA_PE);
/* disable DMA */
tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG);
tmp &= 0xff0;
RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp);
/* PBF hardware reset */
RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe1f);
RAL_BARRIER_WRITE(sc);
RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe00);
if ((error = rt2860_load_microcode(sc)) != 0) {
device_printf(sc->sc_dev, "could not load 8051 microcode\n");
rt2860_stop_locked(sc);
return;
}
- rt2860_set_macaddr(sc, IF_LLADDR(ifp));
+ rt2860_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
/* init Tx power for all Tx rates (from EEPROM) */
for (ridx = 0; ridx < 5; ridx++) {
if (sc->txpow20mhz[ridx] == 0xffffffff)
continue;
RAL_WRITE(sc, RT2860_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]);
}
for (ntries = 0; ntries < 100; ntries++) {
tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG);
if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0)
break;
DELAY(1000);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
rt2860_stop_locked(sc);
return;
}
tmp &= 0xff0;
RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp);
/* reset Rx ring and all 6 Tx rings */
RAL_WRITE(sc, RT2860_WPDMA_RST_IDX, 0x1003f);
/* PBF hardware reset */
RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe1f);
RAL_BARRIER_WRITE(sc);
RAL_WRITE(sc, RT2860_SYS_CTRL, 0xe00);
RAL_WRITE(sc, RT2860_PWR_PIN_CFG, RT2860_IO_RA_PE | RT2860_IO_RF_PE);
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST);
RAL_BARRIER_WRITE(sc);
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, 0);
for (i = 0; i < nitems(rt2860_def_mac); i++)
RAL_WRITE(sc, rt2860_def_mac[i].reg, rt2860_def_mac[i].val);
if (sc->mac_ver >= 0x5390)
RAL_WRITE(sc, RT2860_TX_SW_CFG0, 0x00000404);
else if (sc->mac_ver >= 0x3071) {
/* set delay of PA_PE assertion to 1us (unit of 0.25us) */
RAL_WRITE(sc, RT2860_TX_SW_CFG0,
4 << RT2860_DLY_PAPE_EN_SHIFT);
}
if (!(RAL_READ(sc, RT2860_PCI_CFG) & RT2860_PCI_CFG_PCI)) {
sc->sc_flags |= RT2860_PCIE;
/* PCIe has different clock cycle count than PCI */
tmp = RAL_READ(sc, RT2860_US_CYC_CNT);
tmp = (tmp & ~0xff) | 0x7d;
RAL_WRITE(sc, RT2860_US_CYC_CNT, tmp);
}
/* wait while MAC is busy */
for (ntries = 0; ntries < 100; ntries++) {
if (!(RAL_READ(sc, RT2860_MAC_STATUS_REG) &
(RT2860_RX_STATUS_BUSY | RT2860_TX_STATUS_BUSY)))
break;
DELAY(1000);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for MAC\n");
rt2860_stop_locked(sc);
return;
}
/* clear Host to MCU mailbox */
RAL_WRITE(sc, RT2860_H2M_BBPAGENT, 0);
RAL_WRITE(sc, RT2860_H2M_MAILBOX, 0);
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_RFRESET, 0, 0);
DELAY(1000);
if ((error = rt2860_bbp_init(sc)) != 0) {
rt2860_stop_locked(sc);
return;
}
/* clear RX WCID search table */
RAL_SET_REGION_4(sc, RT2860_WCID_ENTRY(0), 0, 512);
/* clear pairwise key table */
RAL_SET_REGION_4(sc, RT2860_PKEY(0), 0, 2048);
/* clear IV/EIV table */
RAL_SET_REGION_4(sc, RT2860_IVEIV(0), 0, 512);
/* clear WCID attribute table */
RAL_SET_REGION_4(sc, RT2860_WCID_ATTR(0), 0, 256);
/* clear shared key table */
RAL_SET_REGION_4(sc, RT2860_SKEY(0, 0), 0, 8 * 32);
/* clear shared key mode */
RAL_SET_REGION_4(sc, RT2860_SKEY_MODE_0_7, 0, 4);
/* init Tx rings (4 EDCAs + HCCA + Mgt) */
for (qid = 0; qid < 6; qid++) {
RAL_WRITE(sc, RT2860_TX_BASE_PTR(qid), sc->txq[qid].paddr);
RAL_WRITE(sc, RT2860_TX_MAX_CNT(qid), RT2860_TX_RING_COUNT);
RAL_WRITE(sc, RT2860_TX_CTX_IDX(qid), 0);
}
/* init Rx ring */
RAL_WRITE(sc, RT2860_RX_BASE_PTR, sc->rxq.paddr);
RAL_WRITE(sc, RT2860_RX_MAX_CNT, RT2860_RX_RING_COUNT);
RAL_WRITE(sc, RT2860_RX_CALC_IDX, RT2860_RX_RING_COUNT - 1);
/* setup maximum buffer sizes */
RAL_WRITE(sc, RT2860_MAX_LEN_CFG, 1 << 12 |
(MCLBYTES - sizeof (struct rt2860_rxwi) - 2));
for (ntries = 0; ntries < 100; ntries++) {
tmp = RAL_READ(sc, RT2860_WPDMA_GLO_CFG);
if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0)
break;
DELAY(1000);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
rt2860_stop_locked(sc);
return;
}
tmp &= 0xff0;
RAL_WRITE(sc, RT2860_WPDMA_GLO_CFG, tmp);
/* disable interrupts mitigation */
RAL_WRITE(sc, RT2860_DELAY_INT_CFG, 0);
/* write vendor-specific BBP values (from EEPROM) */
for (i = 0; i < 8; i++) {
if (sc->bbp[i].reg == 0 || sc->bbp[i].reg == 0xff)
continue;
rt2860_mcu_bbp_write(sc, sc->bbp[i].reg, sc->bbp[i].val);
}
/* select Main antenna for 1T1R devices */
if (sc->rf_rev == RT3070_RF_2020 ||
sc->rf_rev == RT3070_RF_3020 ||
sc->rf_rev == RT3070_RF_3320 ||
sc->mac_ver == 0x5390)
rt3090_set_rx_antenna(sc, 0);
/* send LEDs operating mode to microcontroller */
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED1, sc->led[0], 0);
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED2, sc->led[1], 0);
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_LED3, sc->led[2], 0);
if (sc->mac_ver >= 0x5390)
rt5390_rf_init(sc);
else if (sc->mac_ver >= 0x3071) {
if ((error = rt3090_rf_init(sc)) != 0) {
rt2860_stop_locked(sc);
return;
}
}
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_SLEEP, 0x02ff, 1);
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_WAKEUP, 0, 1);
if (sc->mac_ver >= 0x5390)
rt5390_rf_wakeup(sc);
else if (sc->mac_ver >= 0x3071)
rt3090_rf_wakeup(sc);
/* disable non-existing Rx chains */
bbp3 = rt2860_mcu_bbp_read(sc, 3);
bbp3 &= ~(1 << 3 | 1 << 4);
if (sc->nrxchains == 2)
bbp3 |= 1 << 3;
else if (sc->nrxchains == 3)
bbp3 |= 1 << 4;
rt2860_mcu_bbp_write(sc, 3, bbp3);
/* disable non-existing Tx chains */
bbp1 = rt2860_mcu_bbp_read(sc, 1);
if (sc->ntxchains == 1)
bbp1 = (bbp1 & ~(1 << 3 | 1 << 4));
else if (sc->mac_ver == 0x3593 && sc->ntxchains == 2)
bbp1 = (bbp1 & ~(1 << 4)) | 1 << 3;
else if (sc->mac_ver == 0x3593 && sc->ntxchains == 3)
bbp1 = (bbp1 & ~(1 << 3)) | 1 << 4;
rt2860_mcu_bbp_write(sc, 1, bbp1);
if (sc->mac_ver >= 0x3071)
rt3090_rf_setup(sc);
/* select default channel */
rt2860_switch_chan(sc, ic->ic_curchan);
/* reset RF from MCU */
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_RFRESET, 0, 0);
/* set RTS threshold */
tmp = RAL_READ(sc, RT2860_TX_RTS_CFG);
tmp &= ~0xffff00;
tmp |= IEEE80211_RTS_DEFAULT << 8;
RAL_WRITE(sc, RT2860_TX_RTS_CFG, tmp);
/* setup initial protection mode */
- rt2860_updateprot(ifp);
+ rt2860_updateprot(sc);
/* turn radio LED on */
rt2860_set_leds(sc, RT2860_LED_RADIO);
/* enable Tx/Rx DMA engine */
if ((error = rt2860_txrx_enable(sc)) != 0) {
rt2860_stop_locked(sc);
return;
}
/* clear pending interrupts */
RAL_WRITE(sc, RT2860_INT_STATUS, 0xffffffff);
/* enable interrupts */
RAL_WRITE(sc, RT2860_INT_MASK, 0x3fffc);
if (sc->sc_flags & RT2860_ADVANCED_PS)
rt2860_mcu_cmd(sc, RT2860_MCU_CMD_PSLEVEL, sc->pslevel, 0);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_flags |= RT2860_RUNNNING;
callout_reset(&sc->watchdog_ch, hz, rt2860_watchdog, sc);
}
static void
rt2860_stop(void *arg)
{
struct rt2860_softc *sc = arg;
RAL_LOCK(sc);
rt2860_stop_locked(sc);
RAL_UNLOCK(sc);
}
static void
rt2860_stop_locked(struct rt2860_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
uint32_t tmp;
int qid;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_flags & RT2860_RUNNNING)
rt2860_set_leds(sc, 0); /* turn all LEDs off */
callout_stop(&sc->watchdog_ch);
sc->sc_tx_timer = 0;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_flags &= ~RT2860_RUNNNING;
/* disable interrupts */
RAL_WRITE(sc, RT2860_INT_MASK, 0);
/* disable GP timer */
rt2860_set_gp_timer(sc, 0);
/* disable Rx */
tmp = RAL_READ(sc, RT2860_MAC_SYS_CTRL);
tmp &= ~(RT2860_MAC_RX_EN | RT2860_MAC_TX_EN);
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, tmp);
/* reset adapter */
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST);
RAL_BARRIER_WRITE(sc);
RAL_WRITE(sc, RT2860_MAC_SYS_CTRL, 0);
/* reset Tx and Rx rings (and reclaim TXWIs) */
sc->qfullmsk = 0;
for (qid = 0; qid < 6; qid++)
rt2860_reset_tx_ring(sc, &sc->txq[qid]);
rt2860_reset_rx_ring(sc, &sc->rxq);
}
int
rt2860_load_microcode(struct rt2860_softc *sc)
{
const struct firmware *fp;
int ntries, error;
RAL_LOCK_ASSERT(sc);
RAL_UNLOCK(sc);
fp = firmware_get("rt2860fw");
RAL_LOCK(sc);
if (fp == NULL) {
device_printf(sc->sc_dev,
"unable to receive rt2860fw firmware image\n");
return EINVAL;
}
/* set "host program ram write selection" bit */
RAL_WRITE(sc, RT2860_SYS_CTRL, RT2860_HST_PM_SEL);
/* write microcode image */
RAL_WRITE_REGION_1(sc, RT2860_FW_BASE, fp->data, fp->datasize);
/* kick microcontroller unit */
RAL_WRITE(sc, RT2860_SYS_CTRL, 0);
RAL_BARRIER_WRITE(sc);
RAL_WRITE(sc, RT2860_SYS_CTRL, RT2860_MCU_RESET);
RAL_WRITE(sc, RT2860_H2M_BBPAGENT, 0);
RAL_WRITE(sc, RT2860_H2M_MAILBOX, 0);
/* wait until microcontroller is ready */
RAL_BARRIER_READ_WRITE(sc);
for (ntries = 0; ntries < 1000; ntries++) {
if (RAL_READ(sc, RT2860_SYS_CTRL) & RT2860_MCU_READY)
break;
DELAY(1000);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for MCU to initialize\n");
error = ETIMEDOUT;
} else
error = 0;
firmware_put(fp, FIRMWARE_UNLOAD);
return error;
}
/*
* This function is called periodically to adjust Tx power based on
* temperature variation.
*/
#ifdef NOT_YET
static void
rt2860_calib(struct rt2860_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
const uint8_t *tssi;
uint8_t step, bbp49;
int8_t ridx, d;
/* read current temperature */
bbp49 = rt2860_mcu_bbp_read(sc, 49);
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_bss->ni_chan)) {
tssi = &sc->tssi_2ghz[4];
step = sc->step_2ghz;
} else {
tssi = &sc->tssi_5ghz[4];
step = sc->step_5ghz;
}
if (bbp49 < tssi[0]) { /* lower than reference */
/* use higher Tx power than default */
for (d = 0; d > -4 && bbp49 <= tssi[d - 1]; d--);
} else if (bbp49 > tssi[0]) { /* greater than reference */
/* use lower Tx power than default */
for (d = 0; d < +4 && bbp49 >= tssi[d + 1]; d++);
} else {
/* use default Tx power */
d = 0;
}
d *= step;
DPRINTF(("BBP49=0x%02x, adjusting Tx power by %d\n", bbp49, d));
/* write adjusted Tx power values for each Tx rate */
for (ridx = 0; ridx < 5; ridx++) {
if (sc->txpow20mhz[ridx] == 0xffffffff)
continue;
RAL_WRITE(sc, RT2860_TX_PWR_CFG(ridx),
b4inc(sc->txpow20mhz[ridx], d));
}
}
#endif
static void
rt3090_set_rx_antenna(struct rt2860_softc *sc, int aux)
{
uint32_t tmp;
if (aux) {
if (sc->mac_ver == 0x5390) {
rt2860_mcu_bbp_write(sc, 152,
rt2860_mcu_bbp_read(sc, 152) & ~0x80);
} else {
tmp = RAL_READ(sc, RT2860_PCI_EECTRL);
RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp & ~RT2860_C);
tmp = RAL_READ(sc, RT2860_GPIO_CTRL);
RAL_WRITE(sc, RT2860_GPIO_CTRL, (tmp & ~0x0808) | 0x08);
}
} else {
if (sc->mac_ver == 0x5390) {
rt2860_mcu_bbp_write(sc, 152,
rt2860_mcu_bbp_read(sc, 152) | 0x80);
} else {
tmp = RAL_READ(sc, RT2860_PCI_EECTRL);
RAL_WRITE(sc, RT2860_PCI_EECTRL, tmp | RT2860_C);
tmp = RAL_READ(sc, RT2860_GPIO_CTRL);
RAL_WRITE(sc, RT2860_GPIO_CTRL, tmp & ~0x0808);
}
}
}
static void
rt2860_switch_chan(struct rt2860_softc *sc, struct ieee80211_channel *c)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
u_int chan, group;
chan = ieee80211_chan2ieee(ic, c);
if (chan == 0 || chan == IEEE80211_CHAN_ANY)
return;
if (sc->mac_ver >= 0x5390)
rt5390_set_chan(sc, chan);
else if (sc->mac_ver >= 0x3071)
rt3090_set_chan(sc, chan);
else
rt2860_set_chan(sc, chan);
/* determine channel group */
if (chan <= 14)
group = 0;
else if (chan <= 64)
group = 1;
else if (chan <= 128)
group = 2;
else
group = 3;
/* XXX necessary only when group has changed! */
if (sc->mac_ver < 0x5390)
rt2860_select_chan_group(sc, group);
DELAY(1000);
}
static int
rt2860_setup_beacon(struct rt2860_softc *sc, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_beacon_offsets bo;
struct rt2860_txwi txwi;
struct mbuf *m;
int ridx;
if ((m = ieee80211_beacon_alloc(vap->iv_bss, &bo)) == NULL)
return ENOBUFS;
memset(&txwi, 0, sizeof txwi);
txwi.wcid = 0xff;
txwi.len = htole16(m->m_pkthdr.len);
/* send beacons at the lowest available rate */
ridx = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ?
RT2860_RIDX_OFDM6 : RT2860_RIDX_CCK1;
txwi.phy = htole16(rt2860_rates[ridx].mcs);
if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM)
txwi.phy |= htole16(RT2860_PHY_OFDM);
txwi.txop = RT2860_TX_TXOP_HT;
txwi.flags = RT2860_TX_TS;
txwi.xflags = RT2860_TX_NSEQ;
RAL_WRITE_REGION_1(sc, RT2860_BCN_BASE(0),
(uint8_t *)&txwi, sizeof txwi);
RAL_WRITE_REGION_1(sc, RT2860_BCN_BASE(0) + sizeof txwi,
mtod(m, uint8_t *), m->m_pkthdr.len);
m_freem(m);
return 0;
}
static void
rt2860_enable_tsf_sync(struct rt2860_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
tmp = RAL_READ(sc, RT2860_BCN_TIME_CFG);
tmp &= ~0x1fffff;
tmp |= vap->iv_bss->ni_intval * 16;
tmp |= RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN;
if (vap->iv_opmode == IEEE80211_M_STA) {
/*
* Local TSF is always updated with remote TSF on beacon
* reception.
*/
tmp |= 1 << RT2860_TSF_SYNC_MODE_SHIFT;
}
else if (vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_MBSS) {
tmp |= RT2860_BCN_TX_EN;
/*
* Local TSF is updated with remote TSF on beacon reception
* only if the remote TSF is greater than local TSF.
*/
tmp |= 2 << RT2860_TSF_SYNC_MODE_SHIFT;
} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
tmp |= RT2860_BCN_TX_EN;
/* SYNC with nobody */
tmp |= 3 << RT2860_TSF_SYNC_MODE_SHIFT;
}
RAL_WRITE(sc, RT2860_BCN_TIME_CFG, tmp);
}
Index: head/sys/dev/ral/rt2860var.h
===================================================================
--- head/sys/dev/ral/rt2860var.h (revision 287196)
+++ head/sys/dev/ral/rt2860var.h (revision 287197)
@@ -1,210 +1,211 @@
/*-
* Copyright (c) 2007 Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2012 Bernhard Schmidt <bschmidt@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* $OpenBSD: rt2860var.h,v 1.20 2010/09/07 16:21:42 deraadt Exp $
* $FreeBSD$
*/
#define RT2860_TX_RING_COUNT 64
#define RT2860_RX_RING_COUNT 128
#define RT2860_TX_POOL_COUNT (RT2860_TX_RING_COUNT * 2)
#define RT2860_MAX_SCATTER ((RT2860_TX_RING_COUNT * 2) - 1)
/* HW supports up to 255 STAs */
#define RT2860_WCID_MAX 254
#define RT2860_AID2WCID(aid) ((aid) & 0xff)
struct rt2860_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsf;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
uint8_t wr_antenna;
int8_t wr_antsignal;
int8_t wr_antnoise;
} __packed;
#define RT2860_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_ANTENNA) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
struct rt2860_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed;
#define RT2860_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct rt2860_tx_data {
struct rt2860_txwi *txwi;
struct mbuf *m;
struct ieee80211_node *ni;
bus_dmamap_t map;
bus_addr_t paddr;
SLIST_ENTRY(rt2860_tx_data) next;
};
struct rt2860_tx_ring {
struct rt2860_txd *txd;
bus_addr_t paddr;
bus_dma_tag_t desc_dmat;
bus_dmamap_t desc_map;
bus_dma_segment_t seg;
struct rt2860_tx_data *data[RT2860_TX_RING_COUNT];
int cur;
int next;
int queued;
};
struct rt2860_rx_data {
struct mbuf *m;
bus_dmamap_t map;
};
struct rt2860_rx_ring {
struct rt2860_rxd *rxd;
bus_addr_t paddr;
bus_dma_tag_t desc_dmat;
bus_dmamap_t desc_map;
bus_dma_tag_t data_dmat;
bus_dma_segment_t seg;
unsigned int cur; /* must be unsigned */
struct rt2860_rx_data data[RT2860_RX_RING_COUNT];
};
struct rt2860_node {
struct ieee80211_node ni;
uint8_t wcid;
uint8_t ridx[IEEE80211_RATE_MAXSIZE];
uint8_t ctl_ridx[IEEE80211_RATE_MAXSIZE];
};
struct rt2860_vap {
struct ieee80211vap ral_vap;
int (*ral_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define RT2860_VAP(vap) ((struct rt2860_vap *)(vap))
struct rt2860_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
+ struct mtx sc_mtx;
device_t sc_dev;
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
- struct mtx sc_mtx;
-
struct callout watchdog_ch;
int sc_invalid;
int sc_debug;
/*
* The same in both up to here
* ------------------------------------------------
*/
uint16_t (*sc_srom_read)(struct rt2860_softc *,
uint16_t);
void (*sc_node_free)(struct ieee80211_node *);
int sc_flags;
#define RT2860_ENABLED (1 << 0)
#define RT2860_ADVANCED_PS (1 << 1)
#define RT2860_PCIE (1 << 2)
+#define RT2860_RUNNNING (1 << 3)
struct ieee80211_node *wcid2ni[RT2860_WCID_MAX];
struct rt2860_tx_ring txq[6];
struct rt2860_rx_ring rxq;
SLIST_HEAD(, rt2860_tx_data) data_pool;
struct rt2860_tx_data data[RT2860_TX_POOL_COUNT];
bus_dma_tag_t txwi_dmat;
bus_dmamap_t txwi_map;
bus_dma_segment_t txwi_seg;
caddr_t txwi_vaddr;
int sc_tx_timer;
int mgtqid;
uint8_t qfullmsk;
uint16_t mac_ver;
uint16_t mac_rev;
uint8_t rf_rev;
uint8_t freq;
uint8_t ntxchains;
uint8_t nrxchains;
uint8_t pslevel;
int8_t txpow1[54];
int8_t txpow2[54];
int8_t rssi_2ghz[3];
int8_t rssi_5ghz[3];
uint8_t lna[4];
uint8_t rf24_20mhz;
uint8_t rf24_40mhz;
uint8_t patch_dac;
uint8_t rfswitch;
uint8_t ext_2ghz_lna;
uint8_t ext_5ghz_lna;
uint8_t calib_2ghz;
uint8_t calib_5ghz;
uint8_t txmixgain_2ghz;
uint8_t txmixgain_5ghz;
uint8_t tssi_2ghz[9];
uint8_t tssi_5ghz[9];
uint8_t step_2ghz;
uint8_t step_5ghz;
struct {
uint8_t reg;
uint8_t val;
} bbp[8], rf[10];
uint8_t leds;
uint16_t led[3];
uint32_t txpow20mhz[5];
uint32_t txpow40mhz_2ghz[5];
uint32_t txpow40mhz_5ghz[5];
struct rt2860_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct rt2860_tx_radiotap_header sc_txtap;
int sc_txtap_len;
};
int rt2860_attach(device_t, int);
int rt2860_detach(void *);
void rt2860_shutdown(void *);
void rt2860_suspend(void *);
void rt2860_resume(void *);
void rt2860_intr(void *);
#define RAL_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define RAL_LOCK_ASSERT(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
#define RAL_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
Index: head/sys/dev/usb/wlan/if_rsu.c
===================================================================
--- head/sys/dev/usb/wlan/if_rsu.c (revision 287196)
+++ head/sys/dev/usb/wlan/if_rsu.c (revision 287197)
@@ -1,2474 +1,2391 @@
/* $OpenBSD: if_rsu.c,v 1.17 2013/04/15 09:23:01 mglocker Exp $ */
/*-
* Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for Realtek RTL8188SU/RTL8191SU/RTL8192SU.
*
* TODO:
* o 11n support
* o h/w crypto
* o hostap / ibss / mesh
*/
#include <sys/param.h>
#include <sys/endian.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/firmware.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include "usbdevs.h"
#define USB_DEBUG_VAR rsu_debug
#include <dev/usb/usb_debug.h>
#include <dev/usb/wlan/if_rsureg.h>
#ifdef USB_DEBUG
static int rsu_debug = 0;
SYSCTL_NODE(_hw_usb, OID_AUTO, rsu, CTLFLAG_RW, 0, "USB rsu");
SYSCTL_INT(_hw_usb_rsu, OID_AUTO, debug, CTLFLAG_RWTUN, &rsu_debug, 0,
"Debug level");
#endif
static const STRUCT_USB_HOST_ID rsu_devs[] = {
#define RSU_HT_NOT_SUPPORTED 0
#define RSU_HT_SUPPORTED 1
#define RSU_DEV_HT(v,p) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, \
RSU_HT_SUPPORTED) }
#define RSU_DEV(v,p) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, \
RSU_HT_NOT_SUPPORTED) }
RSU_DEV(ASUS, RTL8192SU),
RSU_DEV(AZUREWAVE, RTL8192SU_4),
RSU_DEV_HT(ACCTON, RTL8192SU),
RSU_DEV_HT(ASUS, USBN10),
RSU_DEV_HT(AZUREWAVE, RTL8192SU_1),
RSU_DEV_HT(AZUREWAVE, RTL8192SU_2),
RSU_DEV_HT(AZUREWAVE, RTL8192SU_3),
RSU_DEV_HT(AZUREWAVE, RTL8192SU_5),
RSU_DEV_HT(BELKIN, RTL8192SU_1),
RSU_DEV_HT(BELKIN, RTL8192SU_2),
RSU_DEV_HT(BELKIN, RTL8192SU_3),
RSU_DEV_HT(CONCEPTRONIC2, RTL8192SU_1),
RSU_DEV_HT(CONCEPTRONIC2, RTL8192SU_2),
RSU_DEV_HT(CONCEPTRONIC2, RTL8192SU_3),
RSU_DEV_HT(COREGA, RTL8192SU),
RSU_DEV_HT(DLINK2, DWA131A1),
RSU_DEV_HT(DLINK2, RTL8192SU_1),
RSU_DEV_HT(DLINK2, RTL8192SU_2),
RSU_DEV_HT(EDIMAX, RTL8192SU_1),
RSU_DEV_HT(EDIMAX, RTL8192SU_2),
RSU_DEV_HT(EDIMAX, EW7622UMN),
RSU_DEV_HT(GUILLEMOT, HWGUN54),
RSU_DEV_HT(GUILLEMOT, HWNUM300),
RSU_DEV_HT(HAWKING, RTL8192SU_1),
RSU_DEV_HT(HAWKING, RTL8192SU_2),
RSU_DEV_HT(PLANEX2, GWUSNANO),
RSU_DEV_HT(REALTEK, RTL8171),
RSU_DEV_HT(REALTEK, RTL8172),
RSU_DEV_HT(REALTEK, RTL8173),
RSU_DEV_HT(REALTEK, RTL8174),
RSU_DEV_HT(REALTEK, RTL8192SU),
RSU_DEV_HT(REALTEK, RTL8712),
RSU_DEV_HT(REALTEK, RTL8713),
RSU_DEV_HT(SENAO, RTL8192SU_1),
RSU_DEV_HT(SENAO, RTL8192SU_2),
RSU_DEV_HT(SITECOMEU, WL349V1),
RSU_DEV_HT(SITECOMEU, WL353),
RSU_DEV_HT(SWEEX2, LW154),
RSU_DEV_HT(TRENDNET, TEW646UBH),
#undef RSU_DEV_HT
#undef RSU_DEV
};
static device_probe_t rsu_match;
static device_attach_t rsu_attach;
static device_detach_t rsu_detach;
static usb_callback_t rsu_bulk_tx_callback_be_bk;
static usb_callback_t rsu_bulk_tx_callback_vi_vo;
static usb_callback_t rsu_bulk_rx_callback;
static usb_error_t rsu_do_request(struct rsu_softc *,
struct usb_device_request *, void *);
static struct ieee80211vap *
rsu_vap_create(struct ieee80211com *, const char name[],
int, enum ieee80211_opmode, int, const uint8_t bssid[],
const uint8_t mac[]);
static void rsu_vap_delete(struct ieee80211vap *);
static void rsu_scan_start(struct ieee80211com *);
static void rsu_scan_end(struct ieee80211com *);
static void rsu_set_channel(struct ieee80211com *);
static void rsu_update_mcast(struct ieee80211com *);
static int rsu_alloc_rx_list(struct rsu_softc *);
static void rsu_free_rx_list(struct rsu_softc *);
static int rsu_alloc_tx_list(struct rsu_softc *);
static void rsu_free_tx_list(struct rsu_softc *);
static void rsu_free_list(struct rsu_softc *, struct rsu_data [], int);
static struct rsu_data *_rsu_getbuf(struct rsu_softc *);
static struct rsu_data *rsu_getbuf(struct rsu_softc *);
static int rsu_write_region_1(struct rsu_softc *, uint16_t, uint8_t *,
int);
static void rsu_write_1(struct rsu_softc *, uint16_t, uint8_t);
static void rsu_write_2(struct rsu_softc *, uint16_t, uint16_t);
static void rsu_write_4(struct rsu_softc *, uint16_t, uint32_t);
static int rsu_read_region_1(struct rsu_softc *, uint16_t, uint8_t *,
int);
static uint8_t rsu_read_1(struct rsu_softc *, uint16_t);
static uint16_t rsu_read_2(struct rsu_softc *, uint16_t);
static uint32_t rsu_read_4(struct rsu_softc *, uint16_t);
static int rsu_fw_iocmd(struct rsu_softc *, uint32_t);
static uint8_t rsu_efuse_read_1(struct rsu_softc *, uint16_t);
static int rsu_read_rom(struct rsu_softc *);
static int rsu_fw_cmd(struct rsu_softc *, uint8_t, void *, int);
static void rsu_calib_task(void *, int);
static int rsu_newstate(struct ieee80211vap *, enum ieee80211_state, int);
#ifdef notyet
static void rsu_set_key(struct rsu_softc *, const struct ieee80211_key *);
static void rsu_delete_key(struct rsu_softc *, const struct ieee80211_key *);
#endif
static int rsu_site_survey(struct rsu_softc *, struct ieee80211vap *);
static int rsu_join_bss(struct rsu_softc *, struct ieee80211_node *);
static int rsu_disconnect(struct rsu_softc *);
static void rsu_event_survey(struct rsu_softc *, uint8_t *, int);
static void rsu_event_join_bss(struct rsu_softc *, uint8_t *, int);
static void rsu_rx_event(struct rsu_softc *, uint8_t, uint8_t *, int);
static void rsu_rx_multi_event(struct rsu_softc *, uint8_t *, int);
static int8_t rsu_get_rssi(struct rsu_softc *, int, void *);
static struct mbuf *
rsu_rx_frame(struct rsu_softc *, uint8_t *, int, int *);
static struct mbuf *
rsu_rx_multi_frame(struct rsu_softc *, uint8_t *, int, int *);
static struct mbuf *
rsu_rxeof(struct usb_xfer *, struct rsu_data *, int *);
static void rsu_txeof(struct usb_xfer *, struct rsu_data *);
static int rsu_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
-static void rsu_init(void *);
-static void rsu_init_locked(struct rsu_softc *);
+static void rsu_init(struct rsu_softc *);
static int rsu_tx_start(struct rsu_softc *, struct ieee80211_node *,
struct mbuf *, struct rsu_data *);
-static void rsu_start(struct ifnet *);
-static void rsu_start_locked(struct ifnet *);
-static int rsu_ioctl(struct ifnet *, u_long, caddr_t);
-static void rsu_stop(struct ifnet *, int);
-static void rsu_stop_locked(struct ifnet *, int);
+static int rsu_transmit(struct ieee80211com *, struct mbuf *);
+static void rsu_start(struct rsu_softc *);
+static void rsu_parent(struct ieee80211com *);
+static void rsu_stop(struct rsu_softc *);
static void rsu_ms_delay(struct rsu_softc *);
static device_method_t rsu_methods[] = {
DEVMETHOD(device_probe, rsu_match),
DEVMETHOD(device_attach, rsu_attach),
DEVMETHOD(device_detach, rsu_detach),
DEVMETHOD_END
};
static driver_t rsu_driver = {
.name = "rsu",
.methods = rsu_methods,
.size = sizeof(struct rsu_softc)
};
static devclass_t rsu_devclass;
DRIVER_MODULE(rsu, uhub, rsu_driver, rsu_devclass, NULL, 0);
MODULE_DEPEND(rsu, wlan, 1, 1, 1);
MODULE_DEPEND(rsu, usb, 1, 1, 1);
MODULE_DEPEND(rsu, firmware, 1, 1, 1);
MODULE_VERSION(rsu, 1);
static uint8_t rsu_wme_ac_xfer_map[4] = {
[WME_AC_BE] = RSU_BULK_TX_BE_BK,
[WME_AC_BK] = RSU_BULK_TX_BE_BK,
[WME_AC_VI] = RSU_BULK_TX_VI_VO,
[WME_AC_VO] = RSU_BULK_TX_VI_VO,
};
static const struct usb_config rsu_config[RSU_N_TRANSFER] = {
[RSU_BULK_RX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = RSU_RXBUFSZ,
.flags = {
.pipe_bof = 1,
.short_xfer_ok = 1
},
.callback = rsu_bulk_rx_callback
},
[RSU_BULK_TX_BE_BK] = {
.type = UE_BULK,
.endpoint = 0x06,
.direction = UE_DIR_OUT,
.bufsize = RSU_TXBUFSZ,
.flags = {
.ext_buffer = 1,
.pipe_bof = 1,
.force_short_xfer = 1
},
.callback = rsu_bulk_tx_callback_be_bk,
.timeout = RSU_TX_TIMEOUT
},
[RSU_BULK_TX_VI_VO] = {
.type = UE_BULK,
.endpoint = 0x04,
.direction = UE_DIR_OUT,
.bufsize = RSU_TXBUFSZ,
.flags = {
.ext_buffer = 1,
.pipe_bof = 1,
.force_short_xfer = 1
},
.callback = rsu_bulk_tx_callback_vi_vo,
.timeout = RSU_TX_TIMEOUT
},
};
static int
rsu_match(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
if (uaa->usb_mode != USB_MODE_HOST ||
uaa->info.bIfaceIndex != 0 ||
uaa->info.bConfigIndex != 0)
return (ENXIO);
return (usbd_lookup_id_by_uaa(rsu_devs, sizeof(rsu_devs), uaa));
}
static int
rsu_attach(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
struct rsu_softc *sc = device_get_softc(self);
- struct ifnet *ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
int error;
uint8_t iface_index, bands;
device_set_usb_desc(self);
sc->sc_udev = uaa->device;
sc->sc_dev = self;
mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK,
MTX_DEF);
TIMEOUT_TASK_INIT(taskqueue_thread, &sc->calib_task, 0,
rsu_calib_task, sc);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
/* Allocate Tx/Rx buffers. */
error = rsu_alloc_rx_list(sc);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Rx buffers\n");
goto fail_usb;
}
error = rsu_alloc_tx_list(sc);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Tx buffers\n");
rsu_free_rx_list(sc);
goto fail_usb;
}
iface_index = 0;
error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
rsu_config, RSU_N_TRANSFER, sc, &sc->sc_mtx);
if (error) {
device_printf(sc->sc_dev,
"could not allocate USB transfers, err=%s\n",
usbd_errstr(error));
goto fail_usb;
}
RSU_LOCK(sc);
/* Read chip revision. */
sc->cut = MS(rsu_read_4(sc, R92S_PMC_FSM), R92S_PMC_FSM_CUT);
if (sc->cut != 3)
sc->cut = (sc->cut >> 1) + 1;
error = rsu_read_rom(sc);
RSU_UNLOCK(sc);
if (error != 0) {
device_printf(self, "could not read ROM\n");
goto fail_rom;
}
- IEEE80211_ADDR_COPY(sc->sc_bssid, &sc->rom[0x12]);
+ IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->rom[0x12]);
device_printf(self, "MAC/BB RTL8712 cut %d\n", sc->cut);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(self, "cannot allocate interface\n");
- goto fail_ifalloc;
- }
- ic = ifp->if_l2com;
- ifp->if_softc = sc;
- if_initname(ifp, "rsu", device_get_unit(self));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = rsu_init;
- ifp->if_ioctl = rsu_ioctl;
- ifp->if_start = rsu_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
- ifp->if_capabilities |= IFCAP_RXCSUM;
- ifp->if_capenable |= IFCAP_RXCSUM;
- ifp->if_hwassist = CSUM_TCP;
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(self);
ic->ic_phytype = IEEE80211_T_OFDM; /* Not only, but not used. */
ic->ic_opmode = IEEE80211_M_STA; /* Default to BSS mode. */
/* Set device capabilities. */
ic->ic_caps =
IEEE80211_C_STA | /* station mode */
IEEE80211_C_BGSCAN | /* Background scan. */
IEEE80211_C_SHPREAMBLE | /* Short preamble supported. */
IEEE80211_C_SHSLOT | /* Short slot time supported. */
IEEE80211_C_WPA; /* WPA/RSN. */
#if 0
/* Check if HT support is present. */
if (usb_lookup(rsu_devs_noht, uaa->vendor, uaa->product) == NULL) {
/* Set HT capabilities. */
ic->ic_htcaps =
IEEE80211_HTCAP_CBW20_40 |
IEEE80211_HTCAP_DSSSCCK40;
/* Set supported HT rates. */
for (i = 0; i < 2; i++)
ic->ic_sup_mcs[i] = 0xff;
}
#endif
/* Set supported .11b and .11g rates. */
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, sc->sc_bssid);
+ ieee80211_ifattach(ic);
ic->ic_raw_xmit = rsu_raw_xmit;
ic->ic_scan_start = rsu_scan_start;
ic->ic_scan_end = rsu_scan_end;
ic->ic_set_channel = rsu_set_channel;
ic->ic_vap_create = rsu_vap_create;
ic->ic_vap_delete = rsu_vap_delete;
ic->ic_update_mcast = rsu_update_mcast;
+ ic->ic_parent = rsu_parent;
+ ic->ic_transmit = rsu_transmit;
ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr,
sizeof(sc->sc_txtap), RSU_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
RSU_RX_RADIOTAP_PRESENT);
if (bootverbose)
ieee80211_announce(ic);
return (0);
-fail_ifalloc:
fail_rom:
usbd_transfer_unsetup(sc->sc_xfer, RSU_N_TRANSFER);
fail_usb:
mtx_destroy(&sc->sc_mtx);
return (ENXIO);
}
static int
rsu_detach(device_t self)
{
struct rsu_softc *sc = device_get_softc(self);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
- rsu_stop(ifp, 1);
+ RSU_LOCK(sc);
+ rsu_stop(sc);
+ RSU_UNLOCK(sc);
usbd_transfer_unsetup(sc->sc_xfer, RSU_N_TRANSFER);
ieee80211_ifdetach(ic);
taskqueue_drain_timeout(taskqueue_thread, &sc->calib_task);
/* Free Tx/Rx buffers. */
rsu_free_tx_list(sc);
rsu_free_rx_list(sc);
- if_free(ifp);
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static usb_error_t
rsu_do_request(struct rsu_softc *sc, struct usb_device_request *req,
void *data)
{
usb_error_t err;
int ntries = 10;
RSU_ASSERT_LOCKED(sc);
while (ntries--) {
err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
req, data, 0, NULL, 250 /* ms */);
if (err == 0 || err == USB_ERR_NOT_CONFIGURED)
break;
DPRINTFN(1, "Control request failed, %s (retrying)\n",
usbd_errstr(err));
usb_pause_mtx(&sc->sc_mtx, hz / 100);
}
return (err);
}
static struct ieee80211vap *
rsu_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct rsu_vap *uvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return (NULL);
- uvp = (struct rsu_vap *) malloc(sizeof(struct rsu_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (uvp == NULL)
- return (NULL);
+ uvp = malloc(sizeof(struct rsu_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &uvp->vap;
if (ieee80211_vap_setup(ic, vap, name, unit, opmode,
- flags, bssid, mac) != 0) {
+ flags, bssid) != 0) {
/* out of memory */
free(uvp, M_80211_VAP);
return (NULL);
}
/* override state transition machine */
uvp->newstate = vap->iv_newstate;
vap->iv_newstate = rsu_newstate;
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
- ieee80211_media_status);
+ ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return (vap);
}
static void
rsu_vap_delete(struct ieee80211vap *vap)
{
struct rsu_vap *uvp = RSU_VAP(vap);
ieee80211_vap_detach(vap);
free(uvp, M_80211_VAP);
}
static void
rsu_scan_start(struct ieee80211com *ic)
{
- int error;
struct rsu_softc *sc = ic->ic_softc;
+ int error;
/* Scanning is done by the firmware. */
RSU_LOCK(sc);
error = rsu_site_survey(sc, TAILQ_FIRST(&ic->ic_vaps));
RSU_UNLOCK(sc);
if (error != 0)
device_printf(sc->sc_dev,
"could not send site survey command\n");
}
static void
rsu_scan_end(struct ieee80211com *ic)
{
/* Nothing to do here. */
}
static void
rsu_set_channel(struct ieee80211com *ic __unused)
{
/* We are unable to switch channels, yet. */
}
static void
rsu_update_mcast(struct ieee80211com *ic)
{
/* XXX do nothing? */
}
static int
rsu_alloc_list(struct rsu_softc *sc, struct rsu_data data[],
int ndata, int maxsz)
{
int i, error;
for (i = 0; i < ndata; i++) {
struct rsu_data *dp = &data[i];
dp->sc = sc;
dp->m = NULL;
dp->buf = malloc(maxsz, M_USBDEV, M_NOWAIT);
if (dp->buf == NULL) {
device_printf(sc->sc_dev,
"could not allocate buffer\n");
error = ENOMEM;
goto fail;
}
dp->ni = NULL;
}
return (0);
fail:
rsu_free_list(sc, data, ndata);
return (error);
}
static int
rsu_alloc_rx_list(struct rsu_softc *sc)
{
int error, i;
error = rsu_alloc_list(sc, sc->sc_rx, RSU_RX_LIST_COUNT,
RSU_RXBUFSZ);
if (error != 0)
return (error);
STAILQ_INIT(&sc->sc_rx_active);
STAILQ_INIT(&sc->sc_rx_inactive);
for (i = 0; i < RSU_RX_LIST_COUNT; i++)
STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next);
return (0);
}
static int
rsu_alloc_tx_list(struct rsu_softc *sc)
{
int error, i;
error = rsu_alloc_list(sc, sc->sc_tx, RSU_TX_LIST_COUNT,
RSU_TXBUFSZ);
if (error != 0)
return (error);
STAILQ_INIT(&sc->sc_tx_inactive);
for (i = 0; i != RSU_N_TRANSFER; i++) {
STAILQ_INIT(&sc->sc_tx_active[i]);
STAILQ_INIT(&sc->sc_tx_pending[i]);
}
for (i = 0; i < RSU_TX_LIST_COUNT; i++) {
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next);
}
return (0);
}
static void
rsu_free_tx_list(struct rsu_softc *sc)
{
int i;
/* prevent further allocations from TX list(s) */
STAILQ_INIT(&sc->sc_tx_inactive);
for (i = 0; i != RSU_N_TRANSFER; i++) {
STAILQ_INIT(&sc->sc_tx_active[i]);
STAILQ_INIT(&sc->sc_tx_pending[i]);
}
rsu_free_list(sc, sc->sc_tx, RSU_TX_LIST_COUNT);
}
static void
rsu_free_rx_list(struct rsu_softc *sc)
{
/* prevent further allocations from RX list(s) */
STAILQ_INIT(&sc->sc_rx_inactive);
STAILQ_INIT(&sc->sc_rx_active);
rsu_free_list(sc, sc->sc_rx, RSU_RX_LIST_COUNT);
}
static void
rsu_free_list(struct rsu_softc *sc, struct rsu_data data[], int ndata)
{
int i;
for (i = 0; i < ndata; i++) {
struct rsu_data *dp = &data[i];
if (dp->buf != NULL) {
free(dp->buf, M_USBDEV);
dp->buf = NULL;
}
if (dp->ni != NULL) {
ieee80211_free_node(dp->ni);
dp->ni = NULL;
}
}
}
static struct rsu_data *
_rsu_getbuf(struct rsu_softc *sc)
{
struct rsu_data *bf;
bf = STAILQ_FIRST(&sc->sc_tx_inactive);
if (bf != NULL)
STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next);
else
bf = NULL;
if (bf == NULL)
DPRINTF("out of xmit buffers\n");
return (bf);
}
static struct rsu_data *
rsu_getbuf(struct rsu_softc *sc)
{
struct rsu_data *bf;
RSU_ASSERT_LOCKED(sc);
bf = _rsu_getbuf(sc);
- if (bf == NULL) {
- struct ifnet *ifp = sc->sc_ifp;
+ if (bf == NULL)
DPRINTF("stop queue\n");
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- }
return (bf);
}
static int
rsu_write_region_1(struct rsu_softc *sc, uint16_t addr, uint8_t *buf,
int len)
{
usb_device_request_t req;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = R92S_REQ_REGS;
USETW(req.wValue, addr);
USETW(req.wIndex, 0);
USETW(req.wLength, len);
return (rsu_do_request(sc, &req, buf));
}
static void
rsu_write_1(struct rsu_softc *sc, uint16_t addr, uint8_t val)
{
rsu_write_region_1(sc, addr, &val, 1);
}
static void
rsu_write_2(struct rsu_softc *sc, uint16_t addr, uint16_t val)
{
val = htole16(val);
rsu_write_region_1(sc, addr, (uint8_t *)&val, 2);
}
static void
rsu_write_4(struct rsu_softc *sc, uint16_t addr, uint32_t val)
{
val = htole32(val);
rsu_write_region_1(sc, addr, (uint8_t *)&val, 4);
}
static int
rsu_read_region_1(struct rsu_softc *sc, uint16_t addr, uint8_t *buf,
int len)
{
usb_device_request_t req;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = R92S_REQ_REGS;
USETW(req.wValue, addr);
USETW(req.wIndex, 0);
USETW(req.wLength, len);
return (rsu_do_request(sc, &req, buf));
}
static uint8_t
rsu_read_1(struct rsu_softc *sc, uint16_t addr)
{
uint8_t val;
if (rsu_read_region_1(sc, addr, &val, 1) != 0)
return (0xff);
return (val);
}
static uint16_t
rsu_read_2(struct rsu_softc *sc, uint16_t addr)
{
uint16_t val;
if (rsu_read_region_1(sc, addr, (uint8_t *)&val, 2) != 0)
return (0xffff);
return (le16toh(val));
}
static uint32_t
rsu_read_4(struct rsu_softc *sc, uint16_t addr)
{
uint32_t val;
if (rsu_read_region_1(sc, addr, (uint8_t *)&val, 4) != 0)
return (0xffffffff);
return (le32toh(val));
}
static int
rsu_fw_iocmd(struct rsu_softc *sc, uint32_t iocmd)
{
int ntries;
rsu_write_4(sc, R92S_IOCMD_CTRL, iocmd);
rsu_ms_delay(sc);
for (ntries = 0; ntries < 50; ntries++) {
if (rsu_read_4(sc, R92S_IOCMD_CTRL) == 0)
return (0);
rsu_ms_delay(sc);
}
return (ETIMEDOUT);
}
static uint8_t
rsu_efuse_read_1(struct rsu_softc *sc, uint16_t addr)
{
uint32_t reg;
int ntries;
reg = rsu_read_4(sc, R92S_EFUSE_CTRL);
reg = RW(reg, R92S_EFUSE_CTRL_ADDR, addr);
reg &= ~R92S_EFUSE_CTRL_VALID;
rsu_write_4(sc, R92S_EFUSE_CTRL, reg);
/* Wait for read operation to complete. */
for (ntries = 0; ntries < 100; ntries++) {
reg = rsu_read_4(sc, R92S_EFUSE_CTRL);
if (reg & R92S_EFUSE_CTRL_VALID)
return (MS(reg, R92S_EFUSE_CTRL_DATA));
rsu_ms_delay(sc);
}
device_printf(sc->sc_dev,
"could not read efuse byte at address 0x%x\n", addr);
return (0xff);
}
static int
rsu_read_rom(struct rsu_softc *sc)
{
uint8_t *rom = sc->rom;
uint16_t addr = 0;
uint32_t reg;
uint8_t off, msk;
int i;
/* Make sure that ROM type is eFuse and that autoload succeeded. */
reg = rsu_read_1(sc, R92S_EE_9346CR);
if ((reg & (R92S_9356SEL | R92S_EEPROM_EN)) != R92S_EEPROM_EN)
return (EIO);
/* Turn on 2.5V to prevent eFuse leakage. */
reg = rsu_read_1(sc, R92S_EFUSE_TEST + 3);
rsu_write_1(sc, R92S_EFUSE_TEST + 3, reg | 0x80);
rsu_ms_delay(sc);
rsu_write_1(sc, R92S_EFUSE_TEST + 3, reg & ~0x80);
/* Read full ROM image. */
memset(&sc->rom, 0xff, sizeof(sc->rom));
while (addr < 512) {
reg = rsu_efuse_read_1(sc, addr);
if (reg == 0xff)
break;
addr++;
off = reg >> 4;
msk = reg & 0xf;
for (i = 0; i < 4; i++) {
if (msk & (1 << i))
continue;
rom[off * 8 + i * 2 + 0] =
rsu_efuse_read_1(sc, addr);
addr++;
rom[off * 8 + i * 2 + 1] =
rsu_efuse_read_1(sc, addr);
addr++;
}
}
#ifdef USB_DEBUG
if (rsu_debug >= 5) {
/* Dump ROM content. */
printf("\n");
for (i = 0; i < sizeof(sc->rom); i++)
printf("%02x:", rom[i]);
printf("\n");
}
#endif
return (0);
}
static int
rsu_fw_cmd(struct rsu_softc *sc, uint8_t code, void *buf, int len)
{
const uint8_t which = rsu_wme_ac_xfer_map[WME_AC_VO];
struct rsu_data *data;
struct r92s_tx_desc *txd;
struct r92s_fw_cmd_hdr *cmd;
int cmdsz;
int xferlen;
data = rsu_getbuf(sc);
if (data == NULL)
return (ENOMEM);
/* Round-up command length to a multiple of 8 bytes. */
cmdsz = (len + 7) & ~7;
xferlen = sizeof(*txd) + sizeof(*cmd) + cmdsz;
KASSERT(xferlen <= RSU_TXBUFSZ, ("%s: invalid length", __func__));
memset(data->buf, 0, xferlen);
/* Setup Tx descriptor. */
txd = (struct r92s_tx_desc *)data->buf;
txd->txdw0 = htole32(
SM(R92S_TXDW0_OFFSET, sizeof(*txd)) |
SM(R92S_TXDW0_PKTLEN, sizeof(*cmd) + cmdsz) |
R92S_TXDW0_OWN | R92S_TXDW0_FSG | R92S_TXDW0_LSG);
txd->txdw1 = htole32(SM(R92S_TXDW1_QSEL, R92S_TXDW1_QSEL_H2C));
/* Setup command header. */
cmd = (struct r92s_fw_cmd_hdr *)&txd[1];
cmd->len = htole16(cmdsz);
cmd->code = code;
cmd->seq = sc->cmd_seq;
sc->cmd_seq = (sc->cmd_seq + 1) & 0x7f;
/* Copy command payload. */
memcpy(&cmd[1], buf, len);
DPRINTFN(2, "Tx cmd code=0x%x len=0x%x\n", code, cmdsz);
data->buflen = xferlen;
STAILQ_INSERT_TAIL(&sc->sc_tx_pending[which], data, next);
usbd_transfer_start(sc->sc_xfer[which]);
return (0);
}
/* ARGSUSED */
static void
rsu_calib_task(void *arg, int pending __unused)
{
struct rsu_softc *sc = arg;
uint32_t reg;
DPRINTFN(6, "running calibration task\n");
RSU_LOCK(sc);
#ifdef notyet
/* Read WPS PBC status. */
rsu_write_1(sc, R92S_MAC_PINMUX_CTRL,
R92S_GPIOMUX_EN | SM(R92S_GPIOSEL_GPIO, R92S_GPIOSEL_GPIO_JTAG));
rsu_write_1(sc, R92S_GPIO_IO_SEL,
rsu_read_1(sc, R92S_GPIO_IO_SEL) & ~R92S_GPIO_WPS);
reg = rsu_read_1(sc, R92S_GPIO_CTRL);
if (reg != 0xff && (reg & R92S_GPIO_WPS))
DPRINTF(("WPS PBC is pushed\n"));
#endif
/* Read current signal level. */
if (rsu_fw_iocmd(sc, 0xf4000001) == 0) {
reg = rsu_read_4(sc, R92S_IOCMD_DATA);
DPRINTFN(8, "RSSI=%d%%\n", reg >> 4);
}
if (sc->sc_calibrating)
taskqueue_enqueue_timeout(taskqueue_thread, &sc->calib_task, hz);
RSU_UNLOCK(sc);
}
static int
rsu_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct rsu_vap *uvp = RSU_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct rsu_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
struct ieee80211_rateset *rs;
enum ieee80211_state ostate;
int error, startcal = 0;
ostate = vap->iv_state;
DPRINTF("%s -> %s\n", ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
if (ostate == IEEE80211_S_RUN) {
RSU_LOCK(sc);
/* Stop calibration. */
sc->sc_calibrating = 0;
RSU_UNLOCK(sc);
taskqueue_drain_timeout(taskqueue_thread, &sc->calib_task);
/* Disassociate from our current BSS. */
RSU_LOCK(sc);
rsu_disconnect(sc);
} else
RSU_LOCK(sc);
switch (nstate) {
case IEEE80211_S_INIT:
break;
case IEEE80211_S_AUTH:
ni = ieee80211_ref_node(vap->iv_bss);
error = rsu_join_bss(sc, ni);
ieee80211_free_node(ni);
if (error != 0) {
device_printf(sc->sc_dev,
"could not send join command\n");
}
break;
case IEEE80211_S_RUN:
ni = ieee80211_ref_node(vap->iv_bss);
rs = &ni->ni_rates;
/* Indicate highest supported rate. */
ni->ni_txrate = rs->rs_rates[rs->rs_nrates - 1];
ieee80211_free_node(ni);
startcal = 1;
break;
default:
break;
}
sc->sc_calibrating = 1;
/* Start periodic calibration. */
taskqueue_enqueue_timeout(taskqueue_thread, &sc->calib_task, hz);
RSU_UNLOCK(sc);
IEEE80211_LOCK(ic);
return (uvp->newstate(vap, nstate, arg));
}
#ifdef notyet
static void
rsu_set_key(struct rsu_softc *sc, const struct ieee80211_key *k)
{
struct r92s_fw_cmd_set_key key;
memset(&key, 0, sizeof(key));
/* Map net80211 cipher to HW crypto algorithm. */
switch (k->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_WEP:
if (k->wk_keylen < 8)
key.algo = R92S_KEY_ALGO_WEP40;
else
key.algo = R92S_KEY_ALGO_WEP104;
break;
case IEEE80211_CIPHER_TKIP:
key.algo = R92S_KEY_ALGO_TKIP;
break;
case IEEE80211_CIPHER_AES_CCM:
key.algo = R92S_KEY_ALGO_AES;
break;
default:
return;
}
key.id = k->wk_keyix;
key.grpkey = (k->wk_flags & IEEE80211_KEY_GROUP) != 0;
memcpy(key.key, k->wk_key, MIN(k->wk_keylen, sizeof(key.key)));
(void)rsu_fw_cmd(sc, R92S_CMD_SET_KEY, &key, sizeof(key));
}
static void
rsu_delete_key(struct rsu_softc *sc, const struct ieee80211_key *k)
{
struct r92s_fw_cmd_set_key key;
memset(&key, 0, sizeof(key));
key.id = k->wk_keyix;
(void)rsu_fw_cmd(sc, R92S_CMD_SET_KEY, &key, sizeof(key));
}
#endif
static int
rsu_site_survey(struct rsu_softc *sc, struct ieee80211vap *vap)
{
struct r92s_fw_cmd_sitesurvey cmd;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
memset(&cmd, 0, sizeof(cmd));
- if ((ic->ic_flags & IEEE80211_F_ASCAN) || sc->scan_pass == 1)
+ if ((ic->ic_flags & IEEE80211_F_ASCAN) || sc->sc_scan_pass == 1)
cmd.active = htole32(1);
cmd.limit = htole32(48);
- if (sc->scan_pass == 1 && vap->iv_des_nssid > 0) {
+ if (sc->sc_scan_pass == 1 && vap->iv_des_nssid > 0) {
/* Do a directed scan for second pass. */
cmd.ssidlen = htole32(vap->iv_des_ssid[0].len);
memcpy(cmd.ssid, vap->iv_des_ssid[0].ssid,
vap->iv_des_ssid[0].len);
}
- DPRINTF("sending site survey command, pass=%d\n", sc->scan_pass);
+ DPRINTF("sending site survey command, pass=%d\n", sc->sc_scan_pass);
return (rsu_fw_cmd(sc, R92S_CMD_SITE_SURVEY, &cmd, sizeof(cmd)));
}
static int
rsu_join_bss(struct rsu_softc *sc, struct ieee80211_node *ni)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct ndis_wlan_bssid_ex *bss;
struct ndis_802_11_fixed_ies *fixed;
struct r92s_fw_cmd_auth auth;
uint8_t buf[sizeof(*bss) + 128] __aligned(4);
uint8_t *frm;
uint8_t opmode;
int error;
/* Let the FW decide the opmode based on the capinfo field. */
opmode = NDIS802_11AUTOUNKNOWN;
DPRINTF("setting operating mode to %d\n", opmode);
error = rsu_fw_cmd(sc, R92S_CMD_SET_OPMODE, &opmode, sizeof(opmode));
if (error != 0)
return (error);
memset(&auth, 0, sizeof(auth));
if (vap->iv_flags & IEEE80211_F_WPA) {
auth.mode = R92S_AUTHMODE_WPA;
auth.dot1x = (ni->ni_authmode == IEEE80211_AUTH_8021X);
} else
auth.mode = R92S_AUTHMODE_OPEN;
DPRINTF("setting auth mode to %d\n", auth.mode);
error = rsu_fw_cmd(sc, R92S_CMD_SET_AUTH, &auth, sizeof(auth));
if (error != 0)
return (error);
memset(buf, 0, sizeof(buf));
bss = (struct ndis_wlan_bssid_ex *)buf;
IEEE80211_ADDR_COPY(bss->macaddr, ni->ni_bssid);
bss->ssid.ssidlen = htole32(ni->ni_esslen);
memcpy(bss->ssid.ssid, ni->ni_essid, ni->ni_esslen);
if (vap->iv_flags & (IEEE80211_F_PRIVACY | IEEE80211_F_WPA))
bss->privacy = htole32(1);
bss->rssi = htole32(ni->ni_avgrssi);
if (ic->ic_curmode == IEEE80211_MODE_11B)
bss->networktype = htole32(NDIS802_11DS);
else
bss->networktype = htole32(NDIS802_11OFDM24);
bss->config.len = htole32(sizeof(bss->config));
bss->config.bintval = htole32(ni->ni_intval);
bss->config.dsconfig = htole32(ieee80211_chan2ieee(ic, ni->ni_chan));
bss->inframode = htole32(NDIS802_11INFRASTRUCTURE);
memcpy(bss->supprates, ni->ni_rates.rs_rates,
ni->ni_rates.rs_nrates);
/* Write the fixed fields of the beacon frame. */
fixed = (struct ndis_802_11_fixed_ies *)&bss[1];
memcpy(&fixed->tstamp, ni->ni_tstamp.data, 8);
fixed->bintval = htole16(ni->ni_intval);
fixed->capabilities = htole16(ni->ni_capinfo);
/* Write IEs to be included in the association request. */
frm = (uint8_t *)&fixed[1];
frm = ieee80211_add_rsn(frm, vap);
frm = ieee80211_add_wpa(frm, vap);
frm = ieee80211_add_qos(frm, ni);
if (ni->ni_flags & IEEE80211_NODE_HT)
frm = ieee80211_add_htcap(frm, ni);
bss->ieslen = htole32(frm - (uint8_t *)fixed);
bss->len = htole32(((frm - buf) + 3) & ~3);
DPRINTF("sending join bss command to %s chan %d\n",
ether_sprintf(bss->macaddr), le32toh(bss->config.dsconfig));
return (rsu_fw_cmd(sc, R92S_CMD_JOIN_BSS, buf, sizeof(buf)));
}
static int
rsu_disconnect(struct rsu_softc *sc)
{
uint32_t zero = 0; /* :-) */
/* Disassociate from our current BSS. */
DPRINTF("sending disconnect command\n");
return (rsu_fw_cmd(sc, R92S_CMD_DISCONNECT, &zero, sizeof(zero)));
}
static void
rsu_event_survey(struct rsu_softc *sc, uint8_t *buf, int len)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_channel *c;
struct ndis_wlan_bssid_ex *bss;
struct mbuf *m;
int pktlen;
if (__predict_false(len < sizeof(*bss)))
return;
bss = (struct ndis_wlan_bssid_ex *)buf;
if (__predict_false(len < sizeof(*bss) + le32toh(bss->ieslen)))
return;
DPRINTFN(2, "found BSS %s: len=%d chan=%d inframode=%d "
"networktype=%d privacy=%d\n",
ether_sprintf(bss->macaddr), le32toh(bss->len),
le32toh(bss->config.dsconfig), le32toh(bss->inframode),
le32toh(bss->networktype), le32toh(bss->privacy));
/* Build a fake beacon frame to let net80211 do all the parsing. */
pktlen = sizeof(*wh) + le32toh(bss->ieslen);
if (__predict_false(pktlen > MCLBYTES))
return;
m = m_get2(pktlen, M_NOWAIT, MT_DATA, M_PKTHDR);
if (__predict_false(m == NULL))
return;
wh = mtod(m, struct ieee80211_frame *);
wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
IEEE80211_FC0_SUBTYPE_BEACON;
wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
USETW(wh->i_dur, 0);
- IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr);
IEEE80211_ADDR_COPY(wh->i_addr2, bss->macaddr);
IEEE80211_ADDR_COPY(wh->i_addr3, bss->macaddr);
*(uint16_t *)wh->i_seq = 0;
memcpy(&wh[1], (uint8_t *)&bss[1], le32toh(bss->ieslen));
/* Finalize mbuf. */
m->m_pkthdr.len = m->m_len = pktlen;
- m->m_pkthdr.rcvif = ifp;
/* Fix the channel. */
c = ieee80211_find_channel_byieee(ic,
le32toh(bss->config.dsconfig),
IEEE80211_CHAN_G);
if (c) {
ic->ic_curchan = c;
ieee80211_radiotap_chan_change(ic);
}
/* XXX avoid a LOR */
RSU_UNLOCK(sc);
ieee80211_input_all(ic, m, le32toh(bss->rssi), 0);
RSU_LOCK(sc);
}
static void
rsu_event_join_bss(struct rsu_softc *sc, uint8_t *buf, int len)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni = vap->iv_bss;
struct r92s_event_join_bss *rsp;
uint32_t tmp;
int res;
if (__predict_false(len < sizeof(*rsp)))
return;
rsp = (struct r92s_event_join_bss *)buf;
res = (int)le32toh(rsp->join_res);
DPRINTF("Rx join BSS event len=%d res=%d\n", len, res);
if (res <= 0) {
RSU_UNLOCK(sc);
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
RSU_LOCK(sc);
return;
}
tmp = le32toh(rsp->associd);
if (tmp >= vap->iv_max_aid) {
DPRINTF("Assoc ID overflow\n");
tmp = 1;
}
DPRINTF("associated with %s associd=%d\n",
ether_sprintf(rsp->bss.macaddr), tmp);
ni->ni_associd = tmp | 0xc000;
RSU_UNLOCK(sc);
ieee80211_new_state(vap, IEEE80211_S_RUN,
IEEE80211_FC0_SUBTYPE_ASSOC_RESP);
RSU_LOCK(sc);
}
static void
rsu_rx_event(struct rsu_softc *sc, uint8_t code, uint8_t *buf, int len)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
DPRINTFN(4, "Rx event code=%d len=%d\n", code, len);
switch (code) {
case R92S_EVT_SURVEY:
if (vap->iv_state == IEEE80211_S_SCAN)
rsu_event_survey(sc, buf, len);
break;
case R92S_EVT_SURVEY_DONE:
DPRINTF("site survey pass %d done, found %d BSS\n",
- sc->scan_pass, le32toh(*(uint32_t *)buf));
+ sc->sc_scan_pass, le32toh(*(uint32_t *)buf));
if (vap->iv_state != IEEE80211_S_SCAN)
break; /* Ignore if not scanning. */
- if (sc->scan_pass == 0 && vap->iv_des_nssid != 0) {
+ if (sc->sc_scan_pass == 0 && vap->iv_des_nssid != 0) {
/* Schedule a directed scan for hidden APs. */
- sc->scan_pass = 1;
+ sc->sc_scan_pass = 1;
RSU_UNLOCK(sc);
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
RSU_LOCK(sc);
break;
}
- sc->scan_pass = 0;
+ sc->sc_scan_pass = 0;
break;
case R92S_EVT_JOIN_BSS:
if (vap->iv_state == IEEE80211_S_AUTH)
rsu_event_join_bss(sc, buf, len);
break;
#if 0
XXX This event is occurring regularly, possibly due to some power saving event
XXX and disrupts the WLAN traffic. Disable for now.
case R92S_EVT_DEL_STA:
DPRINTF("disassociated from %s\n", ether_sprintf(buf));
if (vap->iv_state == IEEE80211_S_RUN &&
IEEE80211_ADDR_EQ(vap->iv_bss->ni_bssid, buf)) {
RSU_UNLOCK(sc);
ieee80211_new_state(vap, IEEE80211_S_SCAN, -1);
RSU_LOCK(sc);
}
break;
#endif
case R92S_EVT_WPS_PBC:
DPRINTF("WPS PBC pushed.\n");
break;
case R92S_EVT_FWDBG:
- if (ifp->if_flags & IFF_DEBUG) {
+ if (vap->iv_ifp->if_flags & IFF_DEBUG) {
buf[60] = '\0';
printf("FWDBG: %s\n", (char *)buf);
}
break;
default:
break;
}
}
static void
rsu_rx_multi_event(struct rsu_softc *sc, uint8_t *buf, int len)
{
struct r92s_fw_cmd_hdr *cmd;
int cmdsz;
DPRINTFN(6, "Rx events len=%d\n", len);
/* Skip Rx status. */
buf += sizeof(struct r92s_rx_stat);
len -= sizeof(struct r92s_rx_stat);
/* Process all events. */
for (;;) {
/* Check that command header fits. */
if (__predict_false(len < sizeof(*cmd)))
break;
cmd = (struct r92s_fw_cmd_hdr *)buf;
/* Check that command payload fits. */
cmdsz = le16toh(cmd->len);
if (__predict_false(len < sizeof(*cmd) + cmdsz))
break;
/* Process firmware event. */
rsu_rx_event(sc, cmd->code, (uint8_t *)&cmd[1], cmdsz);
if (!(cmd->seq & R92S_FW_CMD_MORE))
break;
buf += sizeof(*cmd) + cmdsz;
len -= sizeof(*cmd) + cmdsz;
}
}
static int8_t
rsu_get_rssi(struct rsu_softc *sc, int rate, void *physt)
{
static const int8_t cckoff[] = { 14, -2, -20, -40 };
struct r92s_rx_phystat *phy;
struct r92s_rx_cck *cck;
uint8_t rpt;
int8_t rssi;
if (rate <= 3) {
cck = (struct r92s_rx_cck *)physt;
rpt = (cck->agc_rpt >> 6) & 0x3;
rssi = cck->agc_rpt & 0x3e;
rssi = cckoff[rpt] - rssi;
} else { /* OFDM/HT. */
phy = (struct r92s_rx_phystat *)physt;
rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 106;
}
return (rssi);
}
static struct mbuf *
rsu_rx_frame(struct rsu_softc *sc, uint8_t *buf, int pktlen, int *rssi)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct r92s_rx_stat *stat;
uint32_t rxdw0, rxdw3;
struct mbuf *m;
uint8_t rate;
int infosz;
stat = (struct r92s_rx_stat *)buf;
rxdw0 = le32toh(stat->rxdw0);
rxdw3 = le32toh(stat->rxdw3);
if (__predict_false(rxdw0 & R92S_RXDW0_CRCERR)) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return NULL;
}
if (__predict_false(pktlen < sizeof(*wh) || pktlen > MCLBYTES)) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return NULL;
}
rate = MS(rxdw3, R92S_RXDW3_RATE);
infosz = MS(rxdw0, R92S_RXDW0_INFOSZ) * 8;
/* Get RSSI from PHY status descriptor if present. */
if (infosz != 0)
*rssi = rsu_get_rssi(sc, rate, &stat[1]);
else
*rssi = 0;
DPRINTFN(5, "Rx frame len=%d rate=%d infosz=%d rssi=%d\n",
pktlen, rate, infosz, *rssi);
m = m_get2(pktlen, M_NOWAIT, MT_DATA, M_PKTHDR);
if (__predict_false(m == NULL)) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return NULL;
}
- /* Finalize mbuf. */
- m->m_pkthdr.rcvif = ifp;
/* Hardware does Rx TCP checksum offload. */
if (rxdw3 & R92S_RXDW3_TCPCHKVALID) {
if (__predict_true(rxdw3 & R92S_RXDW3_TCPCHKRPT))
m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
}
wh = (struct ieee80211_frame *)((uint8_t *)&stat[1] + infosz);
memcpy(mtod(m, uint8_t *), wh, pktlen);
m->m_pkthdr.len = m->m_len = pktlen;
if (ieee80211_radiotap_active(ic)) {
struct rsu_rx_radiotap_header *tap = &sc->sc_rxtap;
/* Map HW rate index to 802.11 rate. */
tap->wr_flags = 2;
if (!(rxdw3 & R92S_RXDW3_HTC)) {
switch (rate) {
/* CCK. */
case 0: tap->wr_rate = 2; break;
case 1: tap->wr_rate = 4; break;
case 2: tap->wr_rate = 11; break;
case 3: tap->wr_rate = 22; break;
/* OFDM. */
case 4: tap->wr_rate = 12; break;
case 5: tap->wr_rate = 18; break;
case 6: tap->wr_rate = 24; break;
case 7: tap->wr_rate = 36; break;
case 8: tap->wr_rate = 48; break;
case 9: tap->wr_rate = 72; break;
case 10: tap->wr_rate = 96; break;
case 11: tap->wr_rate = 108; break;
}
} else if (rate >= 12) { /* MCS0~15. */
/* Bit 7 set means HT MCS instead of rate. */
tap->wr_rate = 0x80 | (rate - 12);
}
tap->wr_dbm_antsignal = *rssi;
tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq);
tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
}
return (m);
}
static struct mbuf *
rsu_rx_multi_frame(struct rsu_softc *sc, uint8_t *buf, int len, int *rssi)
{
struct r92s_rx_stat *stat;
uint32_t rxdw0;
int totlen, pktlen, infosz, npkts;
struct mbuf *m, *m0 = NULL, *prevm = NULL;
/* Get the number of encapsulated frames. */
stat = (struct r92s_rx_stat *)buf;
npkts = MS(le32toh(stat->rxdw2), R92S_RXDW2_PKTCNT);
DPRINTFN(6, "Rx %d frames in one chunk\n", npkts);
/* Process all of them. */
while (npkts-- > 0) {
if (__predict_false(len < sizeof(*stat)))
break;
stat = (struct r92s_rx_stat *)buf;
rxdw0 = le32toh(stat->rxdw0);
pktlen = MS(rxdw0, R92S_RXDW0_PKTLEN);
if (__predict_false(pktlen == 0))
break;
infosz = MS(rxdw0, R92S_RXDW0_INFOSZ) * 8;
/* Make sure everything fits in xfer. */
totlen = sizeof(*stat) + infosz + pktlen;
if (__predict_false(totlen > len))
break;
/* Process 802.11 frame. */
m = rsu_rx_frame(sc, buf, pktlen, rssi);
if (m0 == NULL)
m0 = m;
if (prevm == NULL)
prevm = m;
else {
prevm->m_next = m;
prevm = m;
}
/* Next chunk is 128-byte aligned. */
totlen = (totlen + 127) & ~127;
buf += totlen;
len -= totlen;
}
return (m0);
}
static struct mbuf *
rsu_rxeof(struct usb_xfer *xfer, struct rsu_data *data, int *rssi)
{
struct rsu_softc *sc = data->sc;
+ struct ieee80211com *ic = &sc->sc_ic;
struct r92s_rx_stat *stat;
int len;
usbd_xfer_status(xfer, &len, NULL, NULL, NULL);
if (__predict_false(len < sizeof(*stat))) {
DPRINTF("xfer too short %d\n", len);
- if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return (NULL);
}
/* Determine if it is a firmware C2H event or an 802.11 frame. */
stat = (struct r92s_rx_stat *)data->buf;
if ((le32toh(stat->rxdw1) & 0x1ff) == 0x1ff) {
rsu_rx_multi_event(sc, data->buf, len);
/* No packets to process. */
return (NULL);
} else
return (rsu_rx_multi_frame(sc, data->buf, len, rssi));
}
static void
rsu_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct rsu_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *m = NULL, *next;
struct rsu_data *data;
int rssi = 1;
RSU_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data == NULL)
goto tr_setup;
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
m = rsu_rxeof(xfer, data, &rssi);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
data = STAILQ_FIRST(&sc->sc_rx_inactive);
if (data == NULL) {
KASSERT(m == NULL, ("mbuf isn't NULL"));
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next);
STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next);
usbd_xfer_set_frame_data(xfer, 0, data->buf,
usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
/*
* To avoid LOR we should unlock our private mutex here to call
* ieee80211_input() because here is at the end of a USB
* callback and safe to unlock.
*/
RSU_UNLOCK(sc);
while (m != NULL) {
next = m->m_next;
m->m_next = NULL;
wh = mtod(m, struct ieee80211_frame *);
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
if (ni != NULL) {
(void)ieee80211_input(ni, m, rssi, 0);
ieee80211_free_node(ni);
} else
(void)ieee80211_input_all(ic, m, rssi, 0);
m = next;
}
RSU_LOCK(sc);
break;
default:
/* needs it to the inactive queue due to a error. */
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
}
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto tr_setup;
}
break;
}
}
-
static void
rsu_txeof(struct usb_xfer *xfer, struct rsu_data *data)
{
- struct rsu_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct mbuf *m;
- RSU_ASSERT_LOCKED(sc);
-
- /*
- * Do any tx complete callback. Note this must be done before releasing
- * the node reference.
- */
if (data->m) {
- m = data->m;
- if (m->m_flags & M_TXCB) {
- /* XXX status? */
- ieee80211_process_callback(data->ni, m, 0);
- }
- m_freem(m);
+ /* XXX status? */
+ ieee80211_tx_complete(data->ni, data->m, 0);
data->m = NULL;
- }
- if (data->ni) {
- ieee80211_free_node(data->ni);
data->ni = NULL;
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
static void
rsu_bulk_tx_callback_sub(struct usb_xfer *xfer, usb_error_t error,
uint8_t which)
{
struct rsu_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
struct rsu_data *data;
RSU_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_tx_active[which]);
if (data == NULL)
goto tr_setup;
DPRINTF("transfer done %p\n", data);
STAILQ_REMOVE_HEAD(&sc->sc_tx_active[which], next);
rsu_txeof(xfer, data);
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next);
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
data = STAILQ_FIRST(&sc->sc_tx_pending[which]);
if (data == NULL) {
DPRINTF("empty pending queue sc %p\n", sc);
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_tx_pending[which], next);
STAILQ_INSERT_TAIL(&sc->sc_tx_active[which], data, next);
usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen);
DPRINTF("submitting transfer %p\n", data);
usbd_transfer_submit(xfer);
break;
default:
data = STAILQ_FIRST(&sc->sc_tx_active[which]);
if (data != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_tx_active[which], next);
rsu_txeof(xfer, data);
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next);
}
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(ic->ic_oerrors, 1);
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static void
rsu_bulk_tx_callback_be_bk(struct usb_xfer *xfer, usb_error_t error)
{
rsu_bulk_tx_callback_sub(xfer, error, RSU_BULK_TX_BE_BK);
}
static void
rsu_bulk_tx_callback_vi_vo(struct usb_xfer *xfer, usb_error_t error)
{
rsu_bulk_tx_callback_sub(xfer, error, RSU_BULK_TX_VI_VO);
}
static int
rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
struct mbuf *m0, struct rsu_data *data)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_frame *wh;
struct ieee80211_key *k = NULL;
struct r92s_tx_desc *txd;
uint8_t type;
uint8_t tid = 0;
uint8_t which;
int hasqos;
int xferlen;
RSU_ASSERT_LOCKED(sc);
wh = mtod(m0, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
device_printf(sc->sc_dev,
"ieee80211_crypto_encap returns NULL.\n");
/* XXX we don't expect the fragmented frames */
m_freem(m0);
return (ENOBUFS);
}
wh = mtod(m0, struct ieee80211_frame *);
}
switch (type) {
case IEEE80211_FC0_TYPE_CTL:
case IEEE80211_FC0_TYPE_MGT:
which = rsu_wme_ac_xfer_map[WME_AC_VO];
break;
default:
which = rsu_wme_ac_xfer_map[M_WME_GETAC(m0)];
break;
}
hasqos = 0;
/* Fill Tx descriptor. */
txd = (struct r92s_tx_desc *)data->buf;
memset(txd, 0, sizeof(*txd));
txd->txdw0 |= htole32(
SM(R92S_TXDW0_PKTLEN, m0->m_pkthdr.len) |
SM(R92S_TXDW0_OFFSET, sizeof(*txd)) |
R92S_TXDW0_OWN | R92S_TXDW0_FSG | R92S_TXDW0_LSG);
txd->txdw1 |= htole32(
SM(R92S_TXDW1_MACID, R92S_MACID_BSS) |
SM(R92S_TXDW1_QSEL, R92S_TXDW1_QSEL_BE));
if (!hasqos)
txd->txdw1 |= htole32(R92S_TXDW1_NONQOS);
#ifdef notyet
if (k != NULL) {
switch (k->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_WEP:
cipher = R92S_TXDW1_CIPHER_WEP;
break;
case IEEE80211_CIPHER_TKIP:
cipher = R92S_TXDW1_CIPHER_TKIP;
break;
case IEEE80211_CIPHER_AES_CCM:
cipher = R92S_TXDW1_CIPHER_AES;
break;
default:
cipher = R92S_TXDW1_CIPHER_NONE;
}
txd->txdw1 |= htole32(
SM(R92S_TXDW1_CIPHER, cipher) |
SM(R92S_TXDW1_KEYIDX, k->k_id));
}
#endif
txd->txdw2 |= htole32(R92S_TXDW2_BK);
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
txd->txdw2 |= htole32(R92S_TXDW2_BMCAST);
/*
* Firmware will use and increment the sequence number for the
* specified TID.
*/
txd->txdw3 |= htole32(SM(R92S_TXDW3_SEQ, tid));
if (ieee80211_radiotap_active_vap(vap)) {
struct rsu_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq);
tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags);
ieee80211_radiotap_tx(vap, m0);
}
xferlen = sizeof(*txd) + m0->m_pkthdr.len;
m_copydata(m0, 0, m0->m_pkthdr.len, (caddr_t)&txd[1]);
data->buflen = xferlen;
data->ni = ni;
data->m = m0;
STAILQ_INSERT_TAIL(&sc->sc_tx_pending[which], data, next);
/* start transfer, if any */
usbd_transfer_start(sc->sc_xfer[which]);
return (0);
}
-static void
-rsu_start(struct ifnet *ifp)
+static int
+rsu_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct rsu_softc *sc = ifp->if_softc;
+ struct rsu_softc *sc = ic->ic_softc;
+ int error;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
RSU_LOCK(sc);
- rsu_start_locked(ifp);
+ if (!sc->sc_running) {
+ RSU_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ RSU_UNLOCK(sc);
+ return (error);
+ }
+ rsu_start(sc);
RSU_UNLOCK(sc);
+
+ return (0);
}
static void
-rsu_start_locked(struct ifnet *ifp)
+rsu_start(struct rsu_softc *sc)
{
- struct rsu_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct rsu_data *bf;
struct mbuf *m;
RSU_ASSERT_LOCKED(sc);
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
+ bf = rsu_getbuf(sc);
+ if (bf == NULL) {
+ mbufq_prepend(&sc->sc_snd, m);
break;
+ }
+
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
m->m_pkthdr.rcvif = NULL;
- bf = rsu_getbuf(sc);
- if (bf == NULL) {
- if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
- m_freem(m);
- ieee80211_free_node(ni);
- } else if (rsu_tx_start(sc, ni, m, bf) != 0) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if (rsu_tx_start(sc, ni, m, bf) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
ieee80211_free_node(ni);
+ break;
}
}
}
-static int
-rsu_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+rsu_parent(struct ieee80211com *ic)
{
- struct ieee80211com *ic = ifp->if_l2com;
struct rsu_softc *sc = ic->ic_softc;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ int startall = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- rsu_init(sc);
- startall = 1;
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- rsu_stop(ifp, 1);
+ RSU_LOCK(sc);
+ if (ic->ic_nrunning > 0) {
+ if (!sc->sc_running) {
+ rsu_init(sc);
+ startall = 1;
}
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
- }
+ } else if (sc->sc_running)
+ rsu_stop(sc);
+ RSU_UNLOCK(sc);
- return (error);
+ if (startall)
+ ieee80211_start_all(ic);
}
/*
* Power on sequence for A-cut adapters.
*/
static void
rsu_power_on_acut(struct rsu_softc *sc)
{
uint32_t reg;
rsu_write_1(sc, R92S_SPS0_CTRL + 1, 0x53);
rsu_write_1(sc, R92S_SPS0_CTRL + 0, 0x57);
/* Enable AFE macro block's bandgap and Mbias. */
rsu_write_1(sc, R92S_AFE_MISC,
rsu_read_1(sc, R92S_AFE_MISC) |
R92S_AFE_MISC_BGEN | R92S_AFE_MISC_MBEN);
/* Enable LDOA15 block. */
rsu_write_1(sc, R92S_LDOA15_CTRL,
rsu_read_1(sc, R92S_LDOA15_CTRL) | R92S_LDA15_EN);
rsu_write_1(sc, R92S_SPS1_CTRL,
rsu_read_1(sc, R92S_SPS1_CTRL) | R92S_SPS1_LDEN);
usb_pause_mtx(&sc->sc_mtx, 2 * hz);
/* Enable switch regulator block. */
rsu_write_1(sc, R92S_SPS1_CTRL,
rsu_read_1(sc, R92S_SPS1_CTRL) | R92S_SPS1_SWEN);
rsu_write_4(sc, R92S_SPS1_CTRL, 0x00a7b267);
rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1,
rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) | 0x08);
rsu_write_1(sc, R92S_SYS_FUNC_EN + 1,
rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x20);
rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1,
rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) & ~0x90);
/* Enable AFE clock. */
rsu_write_1(sc, R92S_AFE_XTAL_CTRL + 1,
rsu_read_1(sc, R92S_AFE_XTAL_CTRL + 1) & ~0x04);
/* Enable AFE PLL macro block. */
rsu_write_1(sc, R92S_AFE_PLL_CTRL,
rsu_read_1(sc, R92S_AFE_PLL_CTRL) | 0x11);
/* Attach AFE PLL to MACTOP/BB. */
rsu_write_1(sc, R92S_SYS_ISO_CTRL,
rsu_read_1(sc, R92S_SYS_ISO_CTRL) & ~0x11);
/* Switch to 40MHz clock instead of 80MHz. */
rsu_write_2(sc, R92S_SYS_CLKR,
rsu_read_2(sc, R92S_SYS_CLKR) & ~R92S_SYS_CLKSEL);
/* Enable MAC clock. */
rsu_write_2(sc, R92S_SYS_CLKR,
rsu_read_2(sc, R92S_SYS_CLKR) |
R92S_MAC_CLK_EN | R92S_SYS_CLK_EN);
rsu_write_1(sc, R92S_PMC_FSM, 0x02);
/* Enable digital core and IOREG R/W. */
rsu_write_1(sc, R92S_SYS_FUNC_EN + 1,
rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x08);
rsu_write_1(sc, R92S_SYS_FUNC_EN + 1,
rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x80);
/* Switch the control path to firmware. */
reg = rsu_read_2(sc, R92S_SYS_CLKR);
reg = (reg & ~R92S_SWHW_SEL) | R92S_FWHW_SEL;
rsu_write_2(sc, R92S_SYS_CLKR, reg);
rsu_write_2(sc, R92S_CR, 0x37fc);
/* Fix USB RX FIFO issue. */
rsu_write_1(sc, 0xfe5c,
rsu_read_1(sc, 0xfe5c) | 0x80);
rsu_write_1(sc, 0x00ab,
rsu_read_1(sc, 0x00ab) | 0xc0);
rsu_write_1(sc, R92S_SYS_CLKR,
rsu_read_1(sc, R92S_SYS_CLKR) & ~R92S_SYS_CPU_CLKSEL);
}
/*
* Power on sequence for B-cut and C-cut adapters.
*/
static void
rsu_power_on_bcut(struct rsu_softc *sc)
{
uint32_t reg;
int ntries;
/* Prevent eFuse leakage. */
rsu_write_1(sc, 0x37, 0xb0);
usb_pause_mtx(&sc->sc_mtx, hz / 100);
rsu_write_1(sc, 0x37, 0x30);
/* Switch the control path to hardware. */
reg = rsu_read_2(sc, R92S_SYS_CLKR);
if (reg & R92S_FWHW_SEL) {
rsu_write_2(sc, R92S_SYS_CLKR,
reg & ~(R92S_SWHW_SEL | R92S_FWHW_SEL));
}
rsu_write_1(sc, R92S_SYS_FUNC_EN + 1,
rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) & ~0x8c);
rsu_ms_delay(sc);
rsu_write_1(sc, R92S_SPS0_CTRL + 1, 0x53);
rsu_write_1(sc, R92S_SPS0_CTRL + 0, 0x57);
reg = rsu_read_1(sc, R92S_AFE_MISC);
rsu_write_1(sc, R92S_AFE_MISC, reg | R92S_AFE_MISC_BGEN);
rsu_write_1(sc, R92S_AFE_MISC, reg | R92S_AFE_MISC_BGEN |
R92S_AFE_MISC_MBEN | R92S_AFE_MISC_I32_EN);
/* Enable PLL. */
rsu_write_1(sc, R92S_LDOA15_CTRL,
rsu_read_1(sc, R92S_LDOA15_CTRL) | R92S_LDA15_EN);
rsu_write_1(sc, R92S_LDOV12D_CTRL,
rsu_read_1(sc, R92S_LDOV12D_CTRL) | R92S_LDV12_EN);
rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1,
rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) | 0x08);
rsu_write_1(sc, R92S_SYS_FUNC_EN + 1,
rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x20);
/* Support 64KB IMEM. */
rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1,
rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) & ~0x97);
/* Enable AFE clock. */
rsu_write_1(sc, R92S_AFE_XTAL_CTRL + 1,
rsu_read_1(sc, R92S_AFE_XTAL_CTRL + 1) & ~0x04);
/* Enable AFE PLL macro block. */
reg = rsu_read_1(sc, R92S_AFE_PLL_CTRL);
rsu_write_1(sc, R92S_AFE_PLL_CTRL, reg | 0x11);
rsu_ms_delay(sc);
rsu_write_1(sc, R92S_AFE_PLL_CTRL, reg | 0x51);
rsu_ms_delay(sc);
rsu_write_1(sc, R92S_AFE_PLL_CTRL, reg | 0x11);
rsu_ms_delay(sc);
/* Attach AFE PLL to MACTOP/BB. */
rsu_write_1(sc, R92S_SYS_ISO_CTRL,
rsu_read_1(sc, R92S_SYS_ISO_CTRL) & ~0x11);
/* Switch to 40MHz clock. */
rsu_write_1(sc, R92S_SYS_CLKR, 0x00);
/* Disable CPU clock and 80MHz SSC. */
rsu_write_1(sc, R92S_SYS_CLKR,
rsu_read_1(sc, R92S_SYS_CLKR) | 0xa0);
/* Enable MAC clock. */
rsu_write_2(sc, R92S_SYS_CLKR,
rsu_read_2(sc, R92S_SYS_CLKR) |
R92S_MAC_CLK_EN | R92S_SYS_CLK_EN);
rsu_write_1(sc, R92S_PMC_FSM, 0x02);
/* Enable digital core and IOREG R/W. */
rsu_write_1(sc, R92S_SYS_FUNC_EN + 1,
rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x08);
rsu_write_1(sc, R92S_SYS_FUNC_EN + 1,
rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x80);
/* Switch the control path to firmware. */
reg = rsu_read_2(sc, R92S_SYS_CLKR);
reg = (reg & ~R92S_SWHW_SEL) | R92S_FWHW_SEL;
rsu_write_2(sc, R92S_SYS_CLKR, reg);
rsu_write_2(sc, R92S_CR, 0x37fc);
/* Fix USB RX FIFO issue. */
rsu_write_1(sc, 0xfe5c,
rsu_read_1(sc, 0xfe5c) | 0x80);
rsu_write_1(sc, R92S_SYS_CLKR,
rsu_read_1(sc, R92S_SYS_CLKR) & ~R92S_SYS_CPU_CLKSEL);
rsu_write_1(sc, 0xfe1c, 0x80);
/* Make sure TxDMA is ready to download firmware. */
for (ntries = 0; ntries < 20; ntries++) {
reg = rsu_read_1(sc, R92S_TCR);
if ((reg & (R92S_TCR_IMEM_CHK_RPT | R92S_TCR_EMEM_CHK_RPT)) ==
(R92S_TCR_IMEM_CHK_RPT | R92S_TCR_EMEM_CHK_RPT))
break;
rsu_ms_delay(sc);
}
if (ntries == 20) {
DPRINTF("TxDMA is not ready\n");
/* Reset TxDMA. */
reg = rsu_read_1(sc, R92S_CR);
rsu_write_1(sc, R92S_CR, reg & ~R92S_CR_TXDMA_EN);
rsu_ms_delay(sc);
rsu_write_1(sc, R92S_CR, reg | R92S_CR_TXDMA_EN);
}
}
static void
rsu_power_off(struct rsu_softc *sc)
{
/* Turn RF off. */
rsu_write_1(sc, R92S_RF_CTRL, 0x00);
usb_pause_mtx(&sc->sc_mtx, hz / 200);
/* Turn MAC off. */
/* Switch control path. */
rsu_write_1(sc, R92S_SYS_CLKR + 1, 0x38);
/* Reset MACTOP. */
rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, 0x70);
rsu_write_1(sc, R92S_PMC_FSM, 0x06);
rsu_write_1(sc, R92S_SYS_ISO_CTRL + 0, 0xf9);
rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, 0xe8);
/* Disable AFE PLL. */
rsu_write_1(sc, R92S_AFE_PLL_CTRL, 0x00);
/* Disable A15V. */
rsu_write_1(sc, R92S_LDOA15_CTRL, 0x54);
/* Disable eFuse 1.2V. */
rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, 0x50);
rsu_write_1(sc, R92S_LDOV12D_CTRL, 0x24);
/* Enable AFE macro block's bandgap and Mbias. */
rsu_write_1(sc, R92S_AFE_MISC, 0x30);
/* Disable 1.6V LDO. */
rsu_write_1(sc, R92S_SPS0_CTRL + 0, 0x56);
rsu_write_1(sc, R92S_SPS0_CTRL + 1, 0x43);
}
static int
rsu_fw_loadsection(struct rsu_softc *sc, const uint8_t *buf, int len)
{
const uint8_t which = rsu_wme_ac_xfer_map[WME_AC_VO];
struct rsu_data *data;
struct r92s_tx_desc *txd;
int mlen;
while (len > 0) {
data = rsu_getbuf(sc);
if (data == NULL)
return (ENOMEM);
txd = (struct r92s_tx_desc *)data->buf;
memset(txd, 0, sizeof(*txd));
if (len <= RSU_TXBUFSZ - sizeof(*txd)) {
/* Last chunk. */
txd->txdw0 |= htole32(R92S_TXDW0_LINIP);
mlen = len;
} else
mlen = RSU_TXBUFSZ - sizeof(*txd);
txd->txdw0 |= htole32(SM(R92S_TXDW0_PKTLEN, mlen));
memcpy(&txd[1], buf, mlen);
data->buflen = sizeof(*txd) + mlen;
DPRINTF("starting transfer %p\n", data);
STAILQ_INSERT_TAIL(&sc->sc_tx_pending[which], data, next);
buf += mlen;
len -= mlen;
}
usbd_transfer_start(sc->sc_xfer[which]);
return (0);
}
static int
rsu_load_firmware(struct rsu_softc *sc)
{
const struct r92s_fw_hdr *hdr;
struct r92s_fw_priv *dmem;
const uint8_t *imem, *emem;
int imemsz, ememsz;
const struct firmware *fw;
size_t size;
uint32_t reg;
int ntries, error;
if (rsu_read_1(sc, R92S_TCR) & R92S_TCR_FWRDY) {
DPRINTF("Firmware already loaded\n");
return (0);
}
RSU_UNLOCK(sc);
/* Read firmware image from the filesystem. */
if ((fw = firmware_get("rsu-rtl8712fw")) == NULL) {
device_printf(sc->sc_dev,
"%s: failed load firmware of file rsu-rtl8712fw\n",
__func__);
RSU_LOCK(sc);
return (ENXIO);
}
RSU_LOCK(sc);
size = fw->datasize;
if (size < sizeof(*hdr)) {
device_printf(sc->sc_dev, "firmware too short\n");
error = EINVAL;
goto fail;
}
hdr = (const struct r92s_fw_hdr *)fw->data;
if (hdr->signature != htole16(0x8712) &&
hdr->signature != htole16(0x8192)) {
device_printf(sc->sc_dev,
"invalid firmware signature 0x%x\n",
le16toh(hdr->signature));
error = EINVAL;
goto fail;
}
DPRINTF("FW V%d %02x-%02x %02x:%02x\n", le16toh(hdr->version),
hdr->month, hdr->day, hdr->hour, hdr->minute);
/* Make sure that driver and firmware are in sync. */
if (hdr->privsz != htole32(sizeof(*dmem))) {
device_printf(sc->sc_dev, "unsupported firmware image\n");
error = EINVAL;
goto fail;
}
/* Get FW sections sizes. */
imemsz = le32toh(hdr->imemsz);
ememsz = le32toh(hdr->sramsz);
/* Check that all FW sections fit in image. */
if (size < sizeof(*hdr) + imemsz + ememsz) {
device_printf(sc->sc_dev, "firmware too short\n");
error = EINVAL;
goto fail;
}
imem = (const uint8_t *)&hdr[1];
emem = imem + imemsz;
/* Load IMEM section. */
error = rsu_fw_loadsection(sc, imem, imemsz);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load firmware section %s\n", "IMEM");
goto fail;
}
/* Wait for load to complete. */
for (ntries = 0; ntries != 50; ntries++) {
usb_pause_mtx(&sc->sc_mtx, hz / 100);
reg = rsu_read_1(sc, R92S_TCR);
if (reg & R92S_TCR_IMEM_CODE_DONE)
break;
}
if (ntries == 50) {
device_printf(sc->sc_dev, "timeout waiting for IMEM transfer\n");
error = ETIMEDOUT;
goto fail;
}
/* Load EMEM section. */
error = rsu_fw_loadsection(sc, emem, ememsz);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load firmware section %s\n", "EMEM");
goto fail;
}
/* Wait for load to complete. */
for (ntries = 0; ntries != 50; ntries++) {
usb_pause_mtx(&sc->sc_mtx, hz / 100);
reg = rsu_read_2(sc, R92S_TCR);
if (reg & R92S_TCR_EMEM_CODE_DONE)
break;
}
if (ntries == 50) {
device_printf(sc->sc_dev, "timeout waiting for EMEM transfer\n");
error = ETIMEDOUT;
goto fail;
}
/* Enable CPU. */
rsu_write_1(sc, R92S_SYS_CLKR,
rsu_read_1(sc, R92S_SYS_CLKR) | R92S_SYS_CPU_CLKSEL);
if (!(rsu_read_1(sc, R92S_SYS_CLKR) & R92S_SYS_CPU_CLKSEL)) {
device_printf(sc->sc_dev, "could not enable system clock\n");
error = EIO;
goto fail;
}
rsu_write_2(sc, R92S_SYS_FUNC_EN,
rsu_read_2(sc, R92S_SYS_FUNC_EN) | R92S_FEN_CPUEN);
if (!(rsu_read_2(sc, R92S_SYS_FUNC_EN) & R92S_FEN_CPUEN)) {
device_printf(sc->sc_dev,
"could not enable microcontroller\n");
error = EIO;
goto fail;
}
/* Wait for CPU to initialize. */
for (ntries = 0; ntries < 100; ntries++) {
if (rsu_read_1(sc, R92S_TCR) & R92S_TCR_IMEM_RDY)
break;
rsu_ms_delay(sc);
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"timeout waiting for microcontroller\n");
error = ETIMEDOUT;
goto fail;
}
/* Update DMEM section before loading. */
dmem = __DECONST(struct r92s_fw_priv *, &hdr->priv);
memset(dmem, 0, sizeof(*dmem));
dmem->hci_sel = R92S_HCI_SEL_USB | R92S_HCI_SEL_8172;
dmem->nendpoints = 0;
dmem->rf_config = 0x12; /* 1T2R */
dmem->vcs_type = R92S_VCS_TYPE_AUTO;
dmem->vcs_mode = R92S_VCS_MODE_RTS_CTS;
#ifdef notyet
dmem->bw40_en = (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) != 0;
#endif
dmem->turbo_mode = 1;
/* Load DMEM section. */
error = rsu_fw_loadsection(sc, (uint8_t *)dmem, sizeof(*dmem));
if (error != 0) {
device_printf(sc->sc_dev,
"could not load firmware section %s\n", "DMEM");
goto fail;
}
/* Wait for load to complete. */
for (ntries = 0; ntries < 100; ntries++) {
if (rsu_read_1(sc, R92S_TCR) & R92S_TCR_DMEM_CODE_DONE)
break;
rsu_ms_delay(sc);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for %s transfer\n",
"DMEM");
error = ETIMEDOUT;
goto fail;
}
/* Wait for firmware readiness. */
for (ntries = 0; ntries < 60; ntries++) {
if (!(rsu_read_1(sc, R92S_TCR) & R92S_TCR_FWRDY))
break;
rsu_ms_delay(sc);
}
if (ntries == 60) {
device_printf(sc->sc_dev,
"timeout waiting for firmware readiness\n");
error = ETIMEDOUT;
goto fail;
}
fail:
firmware_put(fw, FIRMWARE_UNLOAD);
return (error);
}
static int
rsu_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct rsu_softc *sc = ic->ic_softc;
struct rsu_data *bf;
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!sc->sc_running) {
m_freem(m);
ieee80211_free_node(ni);
return (ENETDOWN);
}
RSU_LOCK(sc);
bf = rsu_getbuf(sc);
if (bf == NULL) {
ieee80211_free_node(ni);
m_freem(m);
RSU_UNLOCK(sc);
return (ENOBUFS);
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if (rsu_tx_start(sc, ni, m, bf) != 0) {
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
RSU_UNLOCK(sc);
return (EIO);
}
RSU_UNLOCK(sc);
return (0);
}
static void
-rsu_init(void *arg)
+rsu_init(struct rsu_softc *sc)
{
- struct rsu_softc *sc = arg;
-
- RSU_LOCK(sc);
- rsu_init_locked(arg);
- RSU_UNLOCK(sc);
-}
-
-static void
-rsu_init_locked(struct rsu_softc *sc)
-{
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ uint8_t macaddr[IEEE80211_ADDR_LEN];
struct r92s_set_pwr_mode cmd;
int error;
int i;
+ RSU_ASSERT_LOCKED(sc);
+
/* Init host async commands ring. */
sc->cmdq.cur = sc->cmdq.next = sc->cmdq.queued = 0;
/* Power on adapter. */
if (sc->cut == 1)
rsu_power_on_acut(sc);
else
rsu_power_on_bcut(sc);
/* Load firmware. */
error = rsu_load_firmware(sc);
if (error != 0)
goto fail;
/* Enable Rx TCP checksum offload. */
rsu_write_4(sc, R92S_RCR,
rsu_read_4(sc, R92S_RCR) | 0x04000000);
/* Append PHY status. */
rsu_write_4(sc, R92S_RCR,
rsu_read_4(sc, R92S_RCR) | 0x02000000);
rsu_write_4(sc, R92S_CR,
rsu_read_4(sc, R92S_CR) & ~0xff000000);
/* Use 128 bytes pages. */
rsu_write_1(sc, 0x00b5,
rsu_read_1(sc, 0x00b5) | 0x01);
/* Enable USB Rx aggregation. */
rsu_write_1(sc, 0x00bd,
rsu_read_1(sc, 0x00bd) | 0x80);
/* Set USB Rx aggregation threshold. */
rsu_write_1(sc, 0x00d9, 0x01);
/* Set USB Rx aggregation timeout (1.7ms/4). */
rsu_write_1(sc, 0xfe5b, 0x04);
/* Fix USB Rx FIFO issue. */
rsu_write_1(sc, 0xfe5c,
rsu_read_1(sc, 0xfe5c) | 0x80);
/* Set MAC address. */
- rsu_write_region_1(sc, R92S_MACID, IF_LLADDR(ifp),
- IEEE80211_ADDR_LEN);
+ IEEE80211_ADDR_COPY(macaddr, vap ? vap->iv_myaddr : ic->ic_macaddr);
+ rsu_write_region_1(sc, R92S_MACID, macaddr, IEEE80211_ADDR_LEN);
/* It really takes 1.5 seconds for the firmware to boot: */
usb_pause_mtx(&sc->sc_mtx, (3 * hz) / 2);
- DPRINTF("setting MAC address to %s\n", ether_sprintf(IF_LLADDR(ifp)));
- error = rsu_fw_cmd(sc, R92S_CMD_SET_MAC_ADDRESS, IF_LLADDR(ifp),
+ DPRINTF("setting MAC address to %s\n", ether_sprintf(macaddr));
+ error = rsu_fw_cmd(sc, R92S_CMD_SET_MAC_ADDRESS, macaddr,
IEEE80211_ADDR_LEN);
if (error != 0) {
device_printf(sc->sc_dev, "could not set MAC address\n");
goto fail;
}
rsu_write_1(sc, R92S_USB_HRPWM,
R92S_USB_HRPWM_PS_ST_ACTIVE | R92S_USB_HRPWM_PS_ALL_ON);
memset(&cmd, 0, sizeof(cmd));
cmd.mode = R92S_PS_MODE_ACTIVE;
DPRINTF("setting ps mode to %d\n", cmd.mode);
error = rsu_fw_cmd(sc, R92S_CMD_SET_PWR_MODE, &cmd, sizeof(cmd));
if (error != 0) {
device_printf(sc->sc_dev, "could not set PS mode\n");
goto fail;
}
#if 0
if (ic->ic_htcaps & IEEE80211_HTCAP_CBW20_40) {
/* Enable 40MHz mode. */
error = rsu_fw_iocmd(sc,
SM(R92S_IOCMD_CLASS, 0xf4) |
SM(R92S_IOCMD_INDEX, 0x00) |
SM(R92S_IOCMD_VALUE, 0x0007));
if (error != 0) {
device_printf(sc->sc_dev,
"could not enable 40MHz mode\n");
goto fail;
}
}
/* Set default channel. */
ic->ic_bss->ni_chan = ic->ic_ibss_chan;
#endif
- sc->scan_pass = 0;
+ sc->sc_scan_pass = 0;
usbd_transfer_start(sc->sc_xfer[RSU_BULK_RX]);
/* We're ready to go. */
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_running = 1;
return;
fail:
/* Need to stop all failed transfers, if any */
for (i = 0; i != RSU_N_TRANSFER; i++)
usbd_transfer_stop(sc->sc_xfer[i]);
}
static void
-rsu_stop(struct ifnet *ifp, int disable)
+rsu_stop(struct rsu_softc *sc)
{
- struct rsu_softc *sc = ifp->if_softc;
-
- RSU_LOCK(sc);
- rsu_stop_locked(ifp, disable);
- RSU_UNLOCK(sc);
-}
-
-static void
-rsu_stop_locked(struct ifnet *ifp, int disable __unused)
-{
- struct rsu_softc *sc = ifp->if_softc;
int i;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_running = 0;
sc->sc_calibrating = 0;
taskqueue_cancel_timeout(taskqueue_thread, &sc->calib_task, NULL);
/* Power off adapter. */
rsu_power_off(sc);
for (i = 0; i < RSU_N_TRANSFER; i++)
usbd_transfer_stop(sc->sc_xfer[i]);
}
static void
rsu_ms_delay(struct rsu_softc *sc)
{
usb_pause_mtx(&sc->sc_mtx, hz / 1000);
}
Index: head/sys/dev/usb/wlan/if_rsureg.h
===================================================================
--- head/sys/dev/usb/wlan/if_rsureg.h (revision 287196)
+++ head/sys/dev/usb/wlan/if_rsureg.h (revision 287197)
@@ -1,770 +1,771 @@
/*-
* Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* $OpenBSD: if_rsureg.h,v 1.3 2013/04/15 09:23:01 mglocker Exp $
* $FreeBSD$
*/
/* USB Requests. */
#define R92S_REQ_REGS 0x05
/*
* MAC registers.
*/
#define R92S_SYSCFG 0x0000
#define R92S_SYS_ISO_CTRL (R92S_SYSCFG + 0x000)
#define R92S_SYS_FUNC_EN (R92S_SYSCFG + 0x002)
#define R92S_PMC_FSM (R92S_SYSCFG + 0x004)
#define R92S_SYS_CLKR (R92S_SYSCFG + 0x008)
#define R92S_EE_9346CR (R92S_SYSCFG + 0x00a)
#define R92S_AFE_MISC (R92S_SYSCFG + 0x010)
#define R92S_SPS0_CTRL (R92S_SYSCFG + 0x011)
#define R92S_SPS1_CTRL (R92S_SYSCFG + 0x018)
#define R92S_RF_CTRL (R92S_SYSCFG + 0x01f)
#define R92S_LDOA15_CTRL (R92S_SYSCFG + 0x020)
#define R92S_LDOV12D_CTRL (R92S_SYSCFG + 0x021)
#define R92S_AFE_XTAL_CTRL (R92S_SYSCFG + 0x026)
#define R92S_AFE_PLL_CTRL (R92S_SYSCFG + 0x028)
#define R92S_EFUSE_CTRL (R92S_SYSCFG + 0x030)
#define R92S_EFUSE_TEST (R92S_SYSCFG + 0x034)
#define R92S_EFUSE_CLK_CTRL (R92S_SYSCFG + 0x2f8)
#define R92S_CMDCTRL 0x0040
#define R92S_CR (R92S_CMDCTRL + 0x000)
#define R92S_TCR (R92S_CMDCTRL + 0x004)
#define R92S_RCR (R92S_CMDCTRL + 0x008)
#define R92S_MACIDSETTING 0x0050
#define R92S_MACID (R92S_MACIDSETTING + 0x000)
#define R92S_GP 0x01e0
#define R92S_GPIO_CTRL (R92S_GP + 0x00c)
#define R92S_GPIO_IO_SEL (R92S_GP + 0x00e)
#define R92S_MAC_PINMUX_CTRL (R92S_GP + 0x011)
#define R92S_IOCMD_CTRL 0x0370
#define R92S_IOCMD_DATA 0x0374
#define R92S_USB_HRPWM 0xfe58
/* Bits for R92S_SYS_FUNC_EN. */
#define R92S_FEN_CPUEN 0x0400
/* Bits for R92S_PMC_FSM. */
#define R92S_PMC_FSM_CUT_M 0x000f8000
#define R92S_PMC_FSM_CUT_S 15
/* Bits for R92S_SYS_CLKR. */
#define R92S_SYS_CLKSEL 0x0001
#define R92S_SYS_PS_CLKSEL 0x0002
#define R92S_SYS_CPU_CLKSEL 0x0004
#define R92S_MAC_CLK_EN 0x0800
#define R92S_SYS_CLK_EN 0x1000
#define R92S_SWHW_SEL 0x4000
#define R92S_FWHW_SEL 0x8000
/* Bits for R92S_EE_9346CR. */
#define R92S_9356SEL 0x10
#define R92S_EEPROM_EN 0x20
/* Bits for R92S_AFE_MISC. */
#define R92S_AFE_MISC_BGEN 0x01
#define R92S_AFE_MISC_MBEN 0x02
#define R92S_AFE_MISC_I32_EN 0x08
/* Bits for R92S_SPS1_CTRL. */
#define R92S_SPS1_LDEN 0x01
#define R92S_SPS1_SWEN 0x02
/* Bits for R92S_LDOA15_CTRL. */
#define R92S_LDA15_EN 0x01
/* Bits for R92S_LDOV12D_CTRL. */
#define R92S_LDV12_EN 0x01
/* Bits for R92C_EFUSE_CTRL. */
#define R92S_EFUSE_CTRL_DATA_M 0x000000ff
#define R92S_EFUSE_CTRL_DATA_S 0
#define R92S_EFUSE_CTRL_ADDR_M 0x0003ff00
#define R92S_EFUSE_CTRL_ADDR_S 8
#define R92S_EFUSE_CTRL_VALID 0x80000000
/* Bits for R92S_CR. */
#define R92S_CR_TXDMA_EN 0x10
/* Bits for R92S_TCR. */
#define R92S_TCR_IMEM_CODE_DONE 0x01
#define R92S_TCR_IMEM_CHK_RPT 0x02
#define R92S_TCR_EMEM_CODE_DONE 0x04
#define R92S_TCR_EMEM_CHK_RPT 0x08
#define R92S_TCR_DMEM_CODE_DONE 0x10
#define R92S_TCR_IMEM_RDY 0x20
#define R92S_TCR_FWRDY 0x80
/* Bits for R92S_GPIO_IO_SEL. */
#define R92S_GPIO_WPS 0x10
/* Bits for R92S_MAC_PINMUX_CTRL. */
#define R92S_GPIOSEL_GPIO_M 0x03
#define R92S_GPIOSEL_GPIO_S 0
#define R92S_GPIOSEL_GPIO_JTAG 0
#define R92S_GPIOSEL_GPIO_PHYDBG 1
#define R92S_GPIOSEL_GPIO_BT 2
#define R92S_GPIOSEL_GPIO_WLANDBG 3
#define R92S_GPIOMUX_EN 0x08
/* Bits for R92S_IOCMD_CTRL. */
#define R92S_IOCMD_CLASS_M 0xff000000
#define R92S_IOCMD_CLASS_S 24
#define R92S_IOCMD_CLASS_BB_RF 0xf0
#define R92S_IOCMD_VALUE_M 0x00ffff00
#define R92S_IOCMD_VALUE_S 8
#define R92S_IOCMD_INDEX_M 0x000000ff
#define R92S_IOCMD_INDEX_S 0
#define R92S_IOCMD_INDEX_BB_READ 0
#define R92S_IOCMD_INDEX_BB_WRITE 1
#define R92S_IOCMD_INDEX_RF_READ 2
#define R92S_IOCMD_INDEX_RF_WRITE 3
/* Bits for R92S_USB_HRPWM. */
#define R92S_USB_HRPWM_PS_ALL_ON 0x04
#define R92S_USB_HRPWM_PS_ST_ACTIVE 0x08
/*
* Macros to access subfields in registers.
*/
/* Mask and Shift (getter). */
#define MS(val, field) \
(((val) & field##_M) >> field##_S)
/* Shift and Mask (setter). */
#define SM(field, val) \
(((val) << field##_S) & field##_M)
/* Rewrite. */
#define RW(var, field, val) \
(((var) & ~field##_M) | SM(field, val))
/*
* Firmware image header.
*/
struct r92s_fw_priv {
/* QWORD0 */
uint16_t signature;
uint8_t hci_sel;
#define R92S_HCI_SEL_PCIE 0x01
#define R92S_HCI_SEL_USB 0x02
#define R92S_HCI_SEL_SDIO 0x04
#define R92S_HCI_SEL_8172 0x10
#define R92S_HCI_SEL_AP 0x80
uint8_t chip_version;
uint16_t custid;
uint8_t rf_config;
uint8_t nendpoints;
/* QWORD1 */
uint32_t regulatory;
uint8_t rfintfs;
uint8_t def_nettype;
uint8_t turbo_mode;
uint8_t lowpower_mode;
/* QWORD2 */
uint8_t lbk_mode;
uint8_t mp_mode;
uint8_t vcs_type;
#define R92S_VCS_TYPE_DISABLE 0
#define R92S_VCS_TYPE_ENABLE 1
#define R92S_VCS_TYPE_AUTO 2
uint8_t vcs_mode;
#define R92S_VCS_MODE_NONE 0
#define R92S_VCS_MODE_RTS_CTS 1
#define R92S_VCS_MODE_CTS2SELF 2
uint32_t reserved1;
/* QWORD3 */
uint8_t qos_en;
uint8_t bw40_en;
uint8_t amsdu2ampdu_en;
uint8_t ampdu_en;
uint8_t rc_offload;
uint8_t agg_offload;
uint16_t reserved2;
/* QWORD4 */
uint8_t beacon_offload;
uint8_t mlme_offload;
uint8_t hwpc_offload;
uint8_t tcpcsum_offload;
uint8_t tcp_offload;
uint8_t ps_offload;
uint8_t wwlan_offload;
uint8_t reserved3;
/* QWORD5 */
uint16_t tcp_tx_len;
uint16_t tcp_rx_len;
uint32_t reserved4;
} __packed;
struct r92s_fw_hdr {
uint16_t signature;
uint16_t version;
uint32_t dmemsz;
uint32_t imemsz;
uint32_t sramsz;
uint32_t privsz;
uint16_t efuse_addr;
uint16_t h2c_resp_addr;
uint32_t svnrev;
uint8_t month;
uint8_t day;
uint8_t hour;
uint8_t minute;
struct r92s_fw_priv priv;
} __packed;
/* Structure for FW commands and FW events notifications. */
struct r92s_fw_cmd_hdr {
uint16_t len;
uint8_t code;
uint8_t seq;
#define R92S_FW_CMD_MORE 0x80
uint32_t reserved;
} __packed;
/* FW commands codes. */
#define R92S_CMD_READ_MACREG 0
#define R92S_CMD_WRITE_MACREG 1
#define R92S_CMD_READ_BBREG 2
#define R92S_CMD_WRITE_BBREG 3
#define R92S_CMD_READ_RFREG 4
#define R92S_CMD_WRITE_RFREG 5
#define R92S_CMD_READ_EEPROM 6
#define R92S_CMD_WRITE_EEPROM 7
#define R92S_CMD_READ_EFUSE 8
#define R92S_CMD_WRITE_EFUSE 9
#define R92S_CMD_READ_CAM 10
#define R92S_CMD_WRITE_CAM 11
#define R92S_CMD_SET_BCNITV 12
#define R92S_CMD_SET_MBIDCFG 13
#define R92S_CMD_JOIN_BSS 14
#define R92S_CMD_DISCONNECT 15
#define R92S_CMD_CREATE_BSS 16
#define R92S_CMD_SET_OPMODE 17
#define R92S_CMD_SITE_SURVEY 18
#define R92S_CMD_SET_AUTH 19
#define R92S_CMD_SET_KEY 20
#define R92S_CMD_SET_STA_KEY 21
#define R92S_CMD_SET_ASSOC_STA 22
#define R92S_CMD_DEL_ASSOC_STA 23
#define R92S_CMD_SET_STAPWRSTATE 24
#define R92S_CMD_SET_BASIC_RATE 25
#define R92S_CMD_GET_BASIC_RATE 26
#define R92S_CMD_SET_DATA_RATE 27
#define R92S_CMD_GET_DATA_RATE 28
#define R92S_CMD_SET_PHY_INFO 29
#define R92S_CMD_GET_PHY_INFO 30
#define R92S_CMD_SET_PHY 31
#define R92S_CMD_GET_PHY 32
#define R92S_CMD_READ_RSSI 33
#define R92S_CMD_READ_GAIN 34
#define R92S_CMD_SET_ATIM 35
#define R92S_CMD_SET_PWR_MODE 36
#define R92S_CMD_JOIN_BSS_RPT 37
#define R92S_CMD_SET_RA_TABLE 38
#define R92S_CMD_GET_RA_TABLE 39
#define R92S_CMD_GET_CCX_REPORT 40
#define R92S_CMD_GET_DTM_REPORT 41
#define R92S_CMD_GET_TXRATE_STATS 42
#define R92S_CMD_SET_USB_SUSPEND 43
#define R92S_CMD_SET_H2C_LBK 44
#define R92S_CMD_ADDBA_REQ 45
#define R92S_CMD_SET_CHANNEL 46
#define R92S_CMD_SET_TXPOWER 47
#define R92S_CMD_SWITCH_ANTENNA 48
#define R92S_CMD_SET_CRYSTAL_CAL 49
#define R92S_CMD_SET_SINGLE_CARRIER_TX 50
#define R92S_CMD_SET_SINGLE_TONE_TX 51
#define R92S_CMD_SET_CARRIER_SUPPR_TX 52
#define R92S_CMD_SET_CONTINUOUS_TX 53
#define R92S_CMD_SWITCH_BANDWIDTH 54
#define R92S_CMD_TX_BEACON 55
#define R92S_CMD_SET_POWER_TRACKING 56
#define R92S_CMD_AMSDU_TO_AMPDU 57
#define R92S_CMD_SET_MAC_ADDRESS 58
#define R92S_CMD_GET_H2C_LBK 59
#define R92S_CMD_SET_PBREQ_IE 60
#define R92S_CMD_SET_ASSOCREQ_IE 61
#define R92S_CMD_SET_PBRESP_IE 62
#define R92S_CMD_SET_ASSOCRESP_IE 63
#define R92S_CMD_GET_CURDATARATE 64
#define R92S_CMD_GET_TXRETRY_CNT 65
#define R92S_CMD_GET_RXRETRY_CNT 66
#define R92S_CMD_GET_BCNOK_CNT 67
#define R92S_CMD_GET_BCNERR_CNT 68
#define R92S_CMD_GET_CURTXPWR_LEVEL 69
#define R92S_CMD_SET_DIG 70
#define R92S_CMD_SET_RA 71
#define R92S_CMD_SET_PT 72
#define R92S_CMD_READ_TSSI 73
/* FW events notifications codes. */
#define R92S_EVT_READ_MACREG 0
#define R92S_EVT_READ_BBREG 1
#define R92S_EVT_READ_RFREG 2
#define R92S_EVT_READ_EEPROM 3
#define R92S_EVT_READ_EFUSE 4
#define R92S_EVT_READ_CAM 5
#define R92S_EVT_GET_BASICRATE 6
#define R92S_EVT_GET_DATARATE 7
#define R92S_EVT_SURVEY 8
#define R92S_EVT_SURVEY_DONE 9
#define R92S_EVT_JOIN_BSS 10
#define R92S_EVT_ADD_STA 11
#define R92S_EVT_DEL_STA 12
#define R92S_EVT_ATIM_DONE 13
#define R92S_EVT_TX_REPORT 14
#define R92S_EVT_CCX_REPORT 15
#define R92S_EVT_DTM_REPORT 16
#define R92S_EVT_TXRATE_STATS 17
#define R92S_EVT_C2H_LBK 18
#define R92S_EVT_FWDBG 19
#define R92S_EVT_C2H_FEEDBACK 20
#define R92S_EVT_ADDBA 21
#define R92S_EVT_C2H_BCN 22
#define R92S_EVT_PWR_STATE 23
#define R92S_EVT_WPS_PBC 24
#define R92S_EVT_ADDBA_REQ_REPORT 25
/* Structure for R92S_CMD_SITE_SURVEY. */
struct r92s_fw_cmd_sitesurvey {
uint32_t active;
uint32_t limit;
uint32_t ssidlen;
uint8_t ssid[32 + 1];
} __packed;
/* Structure for R92S_CMD_SET_AUTH. */
struct r92s_fw_cmd_auth {
uint8_t mode;
#define R92S_AUTHMODE_OPEN 0
#define R92S_AUTHMODE_SHARED 1
#define R92S_AUTHMODE_WPA 2
uint8_t dot1x;
} __packed;
/* Structure for R92S_CMD_SET_KEY. */
struct r92s_fw_cmd_set_key {
uint8_t algo;
#define R92S_KEY_ALGO_NONE 0
#define R92S_KEY_ALGO_WEP40 1
#define R92S_KEY_ALGO_TKIP 2
#define R92S_KEY_ALGO_TKIP_MMIC 3
#define R92S_KEY_ALGO_AES 4
#define R92S_KEY_ALGO_WEP104 5
uint8_t id;
uint8_t grpkey;
uint8_t key[16];
} __packed;
/* Structures for R92S_EVENT_SURVEY/R92S_CMD_JOIN_BSS. */
/* NDIS_802_11_SSID. */
struct ndis_802_11_ssid {
uint32_t ssidlen;
uint8_t ssid[32];
} __packed;
/* NDIS_802_11_CONFIGURATION_FH. */
struct ndis_802_11_configuration_fh {
uint32_t len;
uint32_t hoppattern;
uint32_t hopset;
uint32_t dwelltime;
} __packed;
/* NDIS_802_11_CONFIGURATION. */
struct ndis_802_11_configuration {
uint32_t len;
uint32_t bintval;
uint32_t atim;
uint32_t dsconfig;
struct ndis_802_11_configuration_fh fhconfig;
} __packed;
/* NDIS_WLAN_BSSID_EX. */
struct ndis_wlan_bssid_ex {
uint32_t len;
uint8_t macaddr[IEEE80211_ADDR_LEN];
uint8_t reserved[2];
struct ndis_802_11_ssid ssid;
uint32_t privacy;
int32_t rssi;
uint32_t networktype;
#define NDIS802_11FH 0
#define NDIS802_11DS 1
#define NDIS802_11OFDM5 2
#define NDIS802_11OFDM24 3
#define NDIS802_11AUTOMODE 4
struct ndis_802_11_configuration config;
uint32_t inframode;
#define NDIS802_11IBSS 0
#define NDIS802_11INFRASTRUCTURE 1
#define NDIS802_11AUTOUNKNOWN 2
#define NDIS802_11MONITOR 3
#define NDIS802_11APMODE 4
uint8_t supprates[16];
uint32_t ieslen;
/* Followed by ``ieslen'' bytes. */
} __packed;
/* NDIS_802_11_FIXED_IEs. */
struct ndis_802_11_fixed_ies {
uint8_t tstamp[8];
uint16_t bintval;
uint16_t capabilities;
} __packed;
/* Structure for R92S_CMD_SET_PWR_MODE. */
struct r92s_set_pwr_mode {
uint8_t mode;
#define R92S_PS_MODE_ACTIVE 0
#define R92S_PS_MODE_MIN 1
#define R92S_PS_MODE_MAX 2
#define R92S_PS_MODE_DTIM 3
#define R92S_PS_MODE_VOIP 4
#define R92S_PS_MODE_UAPSD_WMM 5
#define R92S_PS_MODE_UAPSD 6
#define R92S_PS_MODE_IBSS 7
#define R92S_PS_MODE_WWLAN 8
#define R92S_PS_MODE_RADIOOFF 9
#define R92S_PS_MODE_DISABLE 10
uint8_t low_traffic_en;
uint8_t lpnav_en;
uint8_t rf_low_snr_en;
uint8_t dps_en;
uint8_t bcn_rx_en;
uint8_t bcn_pass_cnt;
uint8_t bcn_to;
uint16_t bcn_itv;
uint8_t app_itv;
uint8_t awake_bcn_itv;
uint8_t smart_ps;
uint8_t bcn_pass_time;
} __packed;
/* Structure for event R92S_EVENT_JOIN_BSS. */
struct r92s_event_join_bss {
uint32_t next;
uint32_t prev;
uint32_t networktype;
uint32_t fixed;
uint32_t lastscanned;
uint32_t associd;
uint32_t join_res;
struct ndis_wlan_bssid_ex bss;
} __packed;
#define R92S_MACID_BSS 5
/* Rx MAC descriptor. */
struct r92s_rx_stat {
uint32_t rxdw0;
#define R92S_RXDW0_PKTLEN_M 0x00003fff
#define R92S_RXDW0_PKTLEN_S 0
#define R92S_RXDW0_CRCERR 0x00004000
#define R92S_RXDW0_INFOSZ_M 0x000f0000
#define R92S_RXDW0_INFOSZ_S 16
#define R92S_RXDW0_QOS 0x00800000
#define R92S_RXDW0_SHIFT_M 0x03000000
#define R92S_RXDW0_SHIFT_S 24
#define R92S_RXDW0_DECRYPTED 0x08000000
uint32_t rxdw1;
#define R92S_RXDW1_MOREFRAG 0x08000000
uint32_t rxdw2;
#define R92S_RXDW2_FRAG_M 0x0000f000
#define R92S_RXDW2_FRAG_S 12
#define R92S_RXDW2_PKTCNT_M 0x00ff0000
#define R92S_RXDW2_PKTCNT_S 16
uint32_t rxdw3;
#define R92S_RXDW3_RATE_M 0x0000003f
#define R92S_RXDW3_RATE_S 0
#define R92S_RXDW3_TCPCHKRPT 0x00000800
#define R92S_RXDW3_IPCHKRPT 0x00001000
#define R92S_RXDW3_TCPCHKVALID 0x00002000
#define R92S_RXDW3_HTC 0x00004000
uint32_t rxdw4;
uint32_t rxdw5;
} __packed __aligned(4);
/* Rx PHY descriptor. */
struct r92s_rx_phystat {
uint32_t phydw0;
uint32_t phydw1;
uint32_t phydw2;
uint32_t phydw3;
uint32_t phydw4;
uint32_t phydw5;
uint32_t phydw6;
uint32_t phydw7;
} __packed __aligned(4);
/* Rx PHY CCK descriptor. */
struct r92s_rx_cck {
uint8_t adc_pwdb[4];
uint8_t sq_rpt;
uint8_t agc_rpt;
} __packed;
/* Tx MAC descriptor. */
struct r92s_tx_desc {
uint32_t txdw0;
#define R92S_TXDW0_PKTLEN_M 0x0000ffff
#define R92S_TXDW0_PKTLEN_S 0
#define R92S_TXDW0_OFFSET_M 0x00ff0000
#define R92S_TXDW0_OFFSET_S 16
#define R92S_TXDW0_TYPE_M 0x03000000
#define R92S_TXDW0_TYPE_S 24
#define R92S_TXDW0_LSG 0x04000000
#define R92S_TXDW0_FSG 0x08000000
#define R92S_TXDW0_LINIP 0x10000000
#define R92S_TXDW0_OWN 0x80000000
uint32_t txdw1;
#define R92S_TXDW1_MACID_M 0x0000001f
#define R92S_TXDW1_MACID_S 0
#define R92S_TXDW1_MOREDATA 0x00000020
#define R92S_TXDW1_MOREFRAG 0x00000040
#define R92S_TXDW1_QSEL_M 0x00001f00
#define R92S_TXDW1_QSEL_S 8
#define R92S_TXDW1_QSEL_BE 0x03
#define R92S_TXDW1_QSEL_H2C 0x1f
#define R92S_TXDW1_NONQOS 0x00010000
#define R92S_TXDW1_KEYIDX_M 0x00060000
#define R92S_TXDW1_KEYIDX_S 17
#define R92S_TXDW1_CIPHER_M 0x00c00000
#define R92S_TXDW1_CIPHER_S 22
#define R92S_TXDW1_CIPHER_WEP 1
#define R92S_TXDW1_CIPHER_TKIP 2
#define R92S_TXDW1_CIPHER_AES 3
#define R92S_TXDW1_HWPC 0x80000000
uint32_t txdw2;
#define R92S_TXDW2_BMCAST 0x00000080
#define R92S_TXDW2_AGGEN 0x20000000
#define R92S_TXDW2_BK 0x40000000
uint32_t txdw3;
#define R92S_TXDW3_SEQ_M 0x0fff0000
#define R92S_TXDW3_SEQ_S 16
#define R92S_TXDW3_FRAG_M 0xf0000000
#define R92S_TXDW3_FRAG_S 28
uint32_t txdw4;
#define R92S_TXDW4_TXBW 0x00040000
uint32_t txdw5;
#define R92S_TXDW5_DISFB 0x00008000
uint16_t ipchksum;
uint16_t tcpchksum;
uint16_t txbufsize;
uint16_t reserved1;
} __packed __aligned(4);
/*
* Driver definitions.
*/
#define RSU_RX_LIST_COUNT 1
#define RSU_TX_LIST_COUNT 32
#define RSU_HOST_CMD_RING_COUNT 32
#define RSU_RXBUFSZ (8 * 1024)
#define RSU_TXBUFSZ \
((sizeof(struct r92s_tx_desc) + IEEE80211_MAX_LEN + 3) & ~3)
#define RSU_TX_TIMEOUT 5000 /* ms */
#define RSU_CMD_TIMEOUT 2000 /* ms */
/* Queue ids (used by soft only). */
#define RSU_QID_BCN 0
#define RSU_QID_MGT 1
#define RSU_QID_BMC 2
#define RSU_QID_VO 3
#define RSU_QID_VI 4
#define RSU_QID_BE 5
#define RSU_QID_BK 6
#define RSU_QID_RXOFF 7
#define RSU_QID_H2C 8
#define RSU_QID_C2H 9
/* Map AC to queue id. */
static const uint8_t rsu_ac2qid[WME_NUM_AC] = {
RSU_QID_BE,
RSU_QID_BK,
RSU_QID_VI,
RSU_QID_VO
};
/* Pipe index to endpoint address mapping. */
static const uint8_t r92s_epaddr[] =
{ 0x83, 0x04, 0x06, 0x0d,
0x05, 0x07,
0x89, 0x0a, 0x0b, 0x0c };
/* Queue id to pipe index mapping for 4 endpoints configurations. */
static const uint8_t rsu_qid2idx_4ep[] =
{ 3, 3, 3, 1, 1, 2, 2, 0, 3, 0 };
/* Queue id to pipe index mapping for 6 endpoints configurations. */
static const uint8_t rsu_qid2idx_6ep[] =
{ 3, 3, 3, 1, 4, 2, 5, 0, 3, 0 };
/* Queue id to pipe index mapping for 11 endpoints configurations. */
static const uint8_t rsu_qid2idx_11ep[] =
{ 7, 9, 8, 1, 4, 2, 5, 0, 3, 6 };
struct rsu_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
uint8_t wr_dbm_antsignal;
} __packed __aligned(8);
#define RSU_RX_RADIOTAP_PRESENT \
(1 << IEEE80211_RADIOTAP_FLAGS | \
1 << IEEE80211_RADIOTAP_RATE | \
1 << IEEE80211_RADIOTAP_CHANNEL | \
1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)
struct rsu_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed __aligned(8);
#define RSU_TX_RADIOTAP_PRESENT \
(1 << IEEE80211_RADIOTAP_FLAGS | \
1 << IEEE80211_RADIOTAP_CHANNEL)
struct rsu_softc;
struct rsu_host_cmd {
void (*cb)(struct rsu_softc *, void *);
uint8_t data[256];
};
struct rsu_cmd_newstate {
enum ieee80211_state state;
int arg;
};
struct rsu_cmd_key {
struct ieee80211_key key;
};
struct rsu_host_cmd_ring {
struct rsu_host_cmd cmd[RSU_HOST_CMD_RING_COUNT];
int cur;
int next;
int queued;
};
enum {
RSU_BULK_RX,
RSU_BULK_TX_BE_BK, /* = WME_AC_BE/BK */
RSU_BULK_TX_VI_VO, /* = WME_AC_VI/VO */
RSU_N_TRANSFER,
};
struct rsu_data {
struct rsu_softc *sc;
uint8_t *buf;
uint16_t buflen;
struct mbuf *m;
struct ieee80211_node *ni;
STAILQ_ENTRY(rsu_data) next;
};
struct rsu_vap {
struct ieee80211vap vap;
struct ieee80211_beacon_offsets bo;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define RSU_VAP(vap) ((struct rsu_vap *)(vap))
#define RSU_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define RSU_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define RSU_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
struct rsu_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct usb_device *sc_udev;
int (*sc_newstate)(struct ieee80211com *,
enum ieee80211_state, int);
struct usbd_interface *sc_iface;
struct timeout_task calib_task;
const uint8_t *qid2idx;
struct mtx sc_mtx;
+ u_int sc_running:1,
+ sc_calibrating:1,
+ sc_scan_pass:1;
u_int cut;
- int scan_pass;
struct rsu_host_cmd_ring cmdq;
struct rsu_data sc_rx[RSU_RX_LIST_COUNT];
struct rsu_data sc_tx[RSU_TX_LIST_COUNT];
struct rsu_data *fwcmd_data;
uint8_t cmd_seq;
uint8_t rom[128];
- uint8_t sc_bssid[IEEE80211_ADDR_LEN];
struct usb_xfer *sc_xfer[RSU_N_TRANSFER];
- uint8_t sc_calibrating;
STAILQ_HEAD(, rsu_data) sc_rx_active;
STAILQ_HEAD(, rsu_data) sc_rx_inactive;
STAILQ_HEAD(, rsu_data) sc_tx_active[RSU_N_TRANSFER];
STAILQ_HEAD(, rsu_data) sc_tx_inactive;
STAILQ_HEAD(, rsu_data) sc_tx_pending[RSU_N_TRANSFER];
union {
struct rsu_rx_radiotap_header th;
uint8_t pad[64];
} sc_rxtapu;
#define sc_rxtap sc_rxtapu.th
int sc_rxtap_len;
union {
struct rsu_tx_radiotap_header th;
uint8_t pad[64];
} sc_txtapu;
#define sc_txtap sc_txtapu.th
int sc_txtap_len;
};
Index: head/sys/dev/usb/wlan/if_rum.c
===================================================================
--- head/sys/dev/usb/wlan/if_rum.c (revision 287196)
+++ head/sys/dev/usb/wlan/if_rum.c (revision 287197)
@@ -1,2404 +1,2326 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2005-2007 Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2006 Niall O'Higgins <niallo@openbsd.org>
* Copyright (c) 2007-2008 Hans Petter Selasky <hselasky@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Ralink Technology RT2501USB/RT2601USB chipset driver
* http://www.ralinktech.com.tw/
*/
#include <sys/param.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kdb.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#endif
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include "usbdevs.h"
#define USB_DEBUG_VAR rum_debug
#include <dev/usb/usb_debug.h>
#include <dev/usb/wlan/if_rumreg.h>
#include <dev/usb/wlan/if_rumvar.h>
#include <dev/usb/wlan/if_rumfw.h>
#ifdef USB_DEBUG
static int rum_debug = 0;
static SYSCTL_NODE(_hw_usb, OID_AUTO, rum, CTLFLAG_RW, 0, "USB rum");
SYSCTL_INT(_hw_usb_rum, OID_AUTO, debug, CTLFLAG_RWTUN, &rum_debug, 0,
"Debug level");
#endif
#define N(a) ((int)(sizeof (a) / sizeof ((a)[0])))
static const STRUCT_USB_HOST_ID rum_devs[] = {
#define RUM_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) }
RUM_DEV(ABOCOM, HWU54DM),
RUM_DEV(ABOCOM, RT2573_2),
RUM_DEV(ABOCOM, RT2573_3),
RUM_DEV(ABOCOM, RT2573_4),
RUM_DEV(ABOCOM, WUG2700),
RUM_DEV(AMIT, CGWLUSB2GO),
RUM_DEV(ASUS, RT2573_1),
RUM_DEV(ASUS, RT2573_2),
RUM_DEV(BELKIN, F5D7050A),
RUM_DEV(BELKIN, F5D9050V3),
RUM_DEV(CISCOLINKSYS, WUSB54GC),
RUM_DEV(CISCOLINKSYS, WUSB54GR),
RUM_DEV(CONCEPTRONIC2, C54RU2),
RUM_DEV(COREGA, CGWLUSB2GL),
RUM_DEV(COREGA, CGWLUSB2GPX),
RUM_DEV(DICKSMITH, CWD854F),
RUM_DEV(DICKSMITH, RT2573),
RUM_DEV(EDIMAX, EW7318USG),
RUM_DEV(DLINK2, DWLG122C1),
RUM_DEV(DLINK2, WUA1340),
RUM_DEV(DLINK2, DWA111),
RUM_DEV(DLINK2, DWA110),
RUM_DEV(GIGABYTE, GNWB01GS),
RUM_DEV(GIGABYTE, GNWI05GS),
RUM_DEV(GIGASET, RT2573),
RUM_DEV(GOODWAY, RT2573),
RUM_DEV(GUILLEMOT, HWGUSB254LB),
RUM_DEV(GUILLEMOT, HWGUSB254V2AP),
RUM_DEV(HUAWEI3COM, WUB320G),
RUM_DEV(MELCO, G54HP),
RUM_DEV(MELCO, SG54HP),
RUM_DEV(MELCO, SG54HG),
RUM_DEV(MELCO, WLIUCG),
RUM_DEV(MELCO, WLRUCG),
RUM_DEV(MELCO, WLRUCGAOSS),
RUM_DEV(MSI, RT2573_1),
RUM_DEV(MSI, RT2573_2),
RUM_DEV(MSI, RT2573_3),
RUM_DEV(MSI, RT2573_4),
RUM_DEV(NOVATECH, RT2573),
RUM_DEV(PLANEX2, GWUS54HP),
RUM_DEV(PLANEX2, GWUS54MINI2),
RUM_DEV(PLANEX2, GWUSMM),
RUM_DEV(QCOM, RT2573),
RUM_DEV(QCOM, RT2573_2),
RUM_DEV(QCOM, RT2573_3),
RUM_DEV(RALINK, RT2573),
RUM_DEV(RALINK, RT2573_2),
RUM_DEV(RALINK, RT2671),
RUM_DEV(SITECOMEU, WL113R2),
RUM_DEV(SITECOMEU, WL172),
RUM_DEV(SPARKLAN, RT2573),
RUM_DEV(SURECOM, RT2573),
#undef RUM_DEV
};
static device_probe_t rum_match;
static device_attach_t rum_attach;
static device_detach_t rum_detach;
static usb_callback_t rum_bulk_read_callback;
static usb_callback_t rum_bulk_write_callback;
static usb_error_t rum_do_request(struct rum_softc *sc,
struct usb_device_request *req, void *data);
static struct ieee80211vap *rum_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode,
int, const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void rum_vap_delete(struct ieee80211vap *);
static void rum_tx_free(struct rum_tx_data *, int);
static void rum_setup_tx_list(struct rum_softc *);
static void rum_unsetup_tx_list(struct rum_softc *);
static int rum_newstate(struct ieee80211vap *,
enum ieee80211_state, int);
static void rum_setup_tx_desc(struct rum_softc *,
struct rum_tx_desc *, uint32_t, uint16_t, int,
int);
static int rum_tx_mgt(struct rum_softc *, struct mbuf *,
struct ieee80211_node *);
static int rum_tx_raw(struct rum_softc *, struct mbuf *,
struct ieee80211_node *,
const struct ieee80211_bpf_params *);
static int rum_tx_data(struct rum_softc *, struct mbuf *,
struct ieee80211_node *);
-static void rum_start(struct ifnet *);
-static int rum_ioctl(struct ifnet *, u_long, caddr_t);
+static int rum_transmit(struct ieee80211com *, struct mbuf *);
+static void rum_start(struct rum_softc *);
+static void rum_parent(struct ieee80211com *);
static void rum_eeprom_read(struct rum_softc *, uint16_t, void *,
int);
static uint32_t rum_read(struct rum_softc *, uint16_t);
static void rum_read_multi(struct rum_softc *, uint16_t, void *,
int);
static usb_error_t rum_write(struct rum_softc *, uint16_t, uint32_t);
static usb_error_t rum_write_multi(struct rum_softc *, uint16_t, void *,
size_t);
static void rum_bbp_write(struct rum_softc *, uint8_t, uint8_t);
static uint8_t rum_bbp_read(struct rum_softc *, uint8_t);
static void rum_rf_write(struct rum_softc *, uint8_t, uint32_t);
static void rum_select_antenna(struct rum_softc *);
static void rum_enable_mrr(struct rum_softc *);
static void rum_set_txpreamble(struct rum_softc *);
static void rum_set_basicrates(struct rum_softc *);
static void rum_select_band(struct rum_softc *,
struct ieee80211_channel *);
static void rum_set_chan(struct rum_softc *,
struct ieee80211_channel *);
static void rum_enable_tsf_sync(struct rum_softc *);
static void rum_enable_tsf(struct rum_softc *);
-static void rum_update_slot(struct ifnet *);
+static void rum_update_slot(struct rum_softc *);
static void rum_set_bssid(struct rum_softc *, const uint8_t *);
static void rum_set_macaddr(struct rum_softc *, const uint8_t *);
static void rum_update_mcast(struct ieee80211com *);
static void rum_update_promisc(struct ieee80211com *);
static void rum_setpromisc(struct rum_softc *);
static const char *rum_get_rf(int);
static void rum_read_eeprom(struct rum_softc *);
static int rum_bbp_init(struct rum_softc *);
-static void rum_init_locked(struct rum_softc *);
-static void rum_init(void *);
+static void rum_init(struct rum_softc *);
static void rum_stop(struct rum_softc *);
static void rum_load_microcode(struct rum_softc *, const uint8_t *,
size_t);
static void rum_prepare_beacon(struct rum_softc *,
struct ieee80211vap *);
static int rum_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void rum_scan_start(struct ieee80211com *);
static void rum_scan_end(struct ieee80211com *);
static void rum_set_channel(struct ieee80211com *);
static int rum_get_rssi(struct rum_softc *, uint8_t);
static void rum_ratectl_start(struct rum_softc *,
struct ieee80211_node *);
static void rum_ratectl_timeout(void *);
static void rum_ratectl_task(void *, int);
static int rum_pause(struct rum_softc *, int);
static const struct {
uint32_t reg;
uint32_t val;
} rum_def_mac[] = {
{ RT2573_TXRX_CSR0, 0x025fb032 },
{ RT2573_TXRX_CSR1, 0x9eaa9eaf },
{ RT2573_TXRX_CSR2, 0x8a8b8c8d },
{ RT2573_TXRX_CSR3, 0x00858687 },
{ RT2573_TXRX_CSR7, 0x2e31353b },
{ RT2573_TXRX_CSR8, 0x2a2a2a2c },
{ RT2573_TXRX_CSR15, 0x0000000f },
{ RT2573_MAC_CSR6, 0x00000fff },
{ RT2573_MAC_CSR8, 0x016c030a },
{ RT2573_MAC_CSR10, 0x00000718 },
{ RT2573_MAC_CSR12, 0x00000004 },
{ RT2573_MAC_CSR13, 0x00007f00 },
{ RT2573_SEC_CSR0, 0x00000000 },
{ RT2573_SEC_CSR1, 0x00000000 },
{ RT2573_SEC_CSR5, 0x00000000 },
{ RT2573_PHY_CSR1, 0x000023b0 },
{ RT2573_PHY_CSR5, 0x00040a06 },
{ RT2573_PHY_CSR6, 0x00080606 },
{ RT2573_PHY_CSR7, 0x00000408 },
{ RT2573_AIFSN_CSR, 0x00002273 },
{ RT2573_CWMIN_CSR, 0x00002344 },
{ RT2573_CWMAX_CSR, 0x000034aa }
};
static const struct {
uint8_t reg;
uint8_t val;
} rum_def_bbp[] = {
{ 3, 0x80 },
{ 15, 0x30 },
{ 17, 0x20 },
{ 21, 0xc8 },
{ 22, 0x38 },
{ 23, 0x06 },
{ 24, 0xfe },
{ 25, 0x0a },
{ 26, 0x0d },
{ 32, 0x0b },
{ 34, 0x12 },
{ 37, 0x07 },
{ 39, 0xf8 },
{ 41, 0x60 },
{ 53, 0x10 },
{ 54, 0x18 },
{ 60, 0x10 },
{ 61, 0x04 },
{ 62, 0x04 },
{ 75, 0xfe },
{ 86, 0xfe },
{ 88, 0xfe },
{ 90, 0x0f },
{ 99, 0x00 },
{ 102, 0x16 },
{ 107, 0x04 }
};
static const struct rfprog {
uint8_t chan;
uint32_t r1, r2, r3, r4;
} rum_rf5226[] = {
{ 1, 0x00b03, 0x001e1, 0x1a014, 0x30282 },
{ 2, 0x00b03, 0x001e1, 0x1a014, 0x30287 },
{ 3, 0x00b03, 0x001e2, 0x1a014, 0x30282 },
{ 4, 0x00b03, 0x001e2, 0x1a014, 0x30287 },
{ 5, 0x00b03, 0x001e3, 0x1a014, 0x30282 },
{ 6, 0x00b03, 0x001e3, 0x1a014, 0x30287 },
{ 7, 0x00b03, 0x001e4, 0x1a014, 0x30282 },
{ 8, 0x00b03, 0x001e4, 0x1a014, 0x30287 },
{ 9, 0x00b03, 0x001e5, 0x1a014, 0x30282 },
{ 10, 0x00b03, 0x001e5, 0x1a014, 0x30287 },
{ 11, 0x00b03, 0x001e6, 0x1a014, 0x30282 },
{ 12, 0x00b03, 0x001e6, 0x1a014, 0x30287 },
{ 13, 0x00b03, 0x001e7, 0x1a014, 0x30282 },
{ 14, 0x00b03, 0x001e8, 0x1a014, 0x30284 },
{ 34, 0x00b03, 0x20266, 0x36014, 0x30282 },
{ 38, 0x00b03, 0x20267, 0x36014, 0x30284 },
{ 42, 0x00b03, 0x20268, 0x36014, 0x30286 },
{ 46, 0x00b03, 0x20269, 0x36014, 0x30288 },
{ 36, 0x00b03, 0x00266, 0x26014, 0x30288 },
{ 40, 0x00b03, 0x00268, 0x26014, 0x30280 },
{ 44, 0x00b03, 0x00269, 0x26014, 0x30282 },
{ 48, 0x00b03, 0x0026a, 0x26014, 0x30284 },
{ 52, 0x00b03, 0x0026b, 0x26014, 0x30286 },
{ 56, 0x00b03, 0x0026c, 0x26014, 0x30288 },
{ 60, 0x00b03, 0x0026e, 0x26014, 0x30280 },
{ 64, 0x00b03, 0x0026f, 0x26014, 0x30282 },
{ 100, 0x00b03, 0x0028a, 0x2e014, 0x30280 },
{ 104, 0x00b03, 0x0028b, 0x2e014, 0x30282 },
{ 108, 0x00b03, 0x0028c, 0x2e014, 0x30284 },
{ 112, 0x00b03, 0x0028d, 0x2e014, 0x30286 },
{ 116, 0x00b03, 0x0028e, 0x2e014, 0x30288 },
{ 120, 0x00b03, 0x002a0, 0x2e014, 0x30280 },
{ 124, 0x00b03, 0x002a1, 0x2e014, 0x30282 },
{ 128, 0x00b03, 0x002a2, 0x2e014, 0x30284 },
{ 132, 0x00b03, 0x002a3, 0x2e014, 0x30286 },
{ 136, 0x00b03, 0x002a4, 0x2e014, 0x30288 },
{ 140, 0x00b03, 0x002a6, 0x2e014, 0x30280 },
{ 149, 0x00b03, 0x002a8, 0x2e014, 0x30287 },
{ 153, 0x00b03, 0x002a9, 0x2e014, 0x30289 },
{ 157, 0x00b03, 0x002ab, 0x2e014, 0x30281 },
{ 161, 0x00b03, 0x002ac, 0x2e014, 0x30283 },
{ 165, 0x00b03, 0x002ad, 0x2e014, 0x30285 }
}, rum_rf5225[] = {
{ 1, 0x00b33, 0x011e1, 0x1a014, 0x30282 },
{ 2, 0x00b33, 0x011e1, 0x1a014, 0x30287 },
{ 3, 0x00b33, 0x011e2, 0x1a014, 0x30282 },
{ 4, 0x00b33, 0x011e2, 0x1a014, 0x30287 },
{ 5, 0x00b33, 0x011e3, 0x1a014, 0x30282 },
{ 6, 0x00b33, 0x011e3, 0x1a014, 0x30287 },
{ 7, 0x00b33, 0x011e4, 0x1a014, 0x30282 },
{ 8, 0x00b33, 0x011e4, 0x1a014, 0x30287 },
{ 9, 0x00b33, 0x011e5, 0x1a014, 0x30282 },
{ 10, 0x00b33, 0x011e5, 0x1a014, 0x30287 },
{ 11, 0x00b33, 0x011e6, 0x1a014, 0x30282 },
{ 12, 0x00b33, 0x011e6, 0x1a014, 0x30287 },
{ 13, 0x00b33, 0x011e7, 0x1a014, 0x30282 },
{ 14, 0x00b33, 0x011e8, 0x1a014, 0x30284 },
{ 34, 0x00b33, 0x01266, 0x26014, 0x30282 },
{ 38, 0x00b33, 0x01267, 0x26014, 0x30284 },
{ 42, 0x00b33, 0x01268, 0x26014, 0x30286 },
{ 46, 0x00b33, 0x01269, 0x26014, 0x30288 },
{ 36, 0x00b33, 0x01266, 0x26014, 0x30288 },
{ 40, 0x00b33, 0x01268, 0x26014, 0x30280 },
{ 44, 0x00b33, 0x01269, 0x26014, 0x30282 },
{ 48, 0x00b33, 0x0126a, 0x26014, 0x30284 },
{ 52, 0x00b33, 0x0126b, 0x26014, 0x30286 },
{ 56, 0x00b33, 0x0126c, 0x26014, 0x30288 },
{ 60, 0x00b33, 0x0126e, 0x26014, 0x30280 },
{ 64, 0x00b33, 0x0126f, 0x26014, 0x30282 },
{ 100, 0x00b33, 0x0128a, 0x2e014, 0x30280 },
{ 104, 0x00b33, 0x0128b, 0x2e014, 0x30282 },
{ 108, 0x00b33, 0x0128c, 0x2e014, 0x30284 },
{ 112, 0x00b33, 0x0128d, 0x2e014, 0x30286 },
{ 116, 0x00b33, 0x0128e, 0x2e014, 0x30288 },
{ 120, 0x00b33, 0x012a0, 0x2e014, 0x30280 },
{ 124, 0x00b33, 0x012a1, 0x2e014, 0x30282 },
{ 128, 0x00b33, 0x012a2, 0x2e014, 0x30284 },
{ 132, 0x00b33, 0x012a3, 0x2e014, 0x30286 },
{ 136, 0x00b33, 0x012a4, 0x2e014, 0x30288 },
{ 140, 0x00b33, 0x012a6, 0x2e014, 0x30280 },
{ 149, 0x00b33, 0x012a8, 0x2e014, 0x30287 },
{ 153, 0x00b33, 0x012a9, 0x2e014, 0x30289 },
{ 157, 0x00b33, 0x012ab, 0x2e014, 0x30281 },
{ 161, 0x00b33, 0x012ac, 0x2e014, 0x30283 },
{ 165, 0x00b33, 0x012ad, 0x2e014, 0x30285 }
};
static const struct usb_config rum_config[RUM_N_TRANSFER] = {
[RUM_BULK_WR] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = (MCLBYTES + RT2573_TX_DESC_SIZE + 8),
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = rum_bulk_write_callback,
.timeout = 5000, /* ms */
},
[RUM_BULK_RD] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = (MCLBYTES + RT2573_RX_DESC_SIZE),
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = rum_bulk_read_callback,
},
};
static int
rum_match(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != 0)
return (ENXIO);
if (uaa->info.bIfaceIndex != RT2573_IFACE_INDEX)
return (ENXIO);
return (usbd_lookup_id_by_uaa(rum_devs, sizeof(rum_devs), uaa));
}
static int
rum_attach(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
struct rum_softc *sc = device_get_softc(self);
- struct ieee80211com *ic;
- struct ifnet *ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t iface_index, bands;
uint32_t tmp;
int error, ntries;
device_set_usb_desc(self);
sc->sc_udev = uaa->device;
sc->sc_dev = self;
mtx_init(&sc->sc_mtx, device_get_nameunit(self),
MTX_NETWORK_LOCK, MTX_DEF);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
iface_index = RT2573_IFACE_INDEX;
error = usbd_transfer_setup(uaa->device, &iface_index,
sc->sc_xfer, rum_config, RUM_N_TRANSFER, sc, &sc->sc_mtx);
if (error) {
device_printf(self, "could not allocate USB transfers, "
"err=%s\n", usbd_errstr(error));
goto detach;
}
RUM_LOCK(sc);
/* retrieve RT2573 rev. no */
for (ntries = 0; ntries < 100; ntries++) {
if ((tmp = rum_read(sc, RT2573_MAC_CSR0)) != 0)
break;
if (rum_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for chip to settle\n");
RUM_UNLOCK(sc);
goto detach;
}
/* retrieve MAC address and various other things from EEPROM */
rum_read_eeprom(sc);
device_printf(sc->sc_dev, "MAC/BBP RT2573 (rev 0x%05x), RF %s\n",
tmp, rum_get_rf(sc->rf_rev));
rum_load_microcode(sc, rt2573_ucode, sizeof(rt2573_ucode));
RUM_UNLOCK(sc);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- goto detach;
- }
- ic = ifp->if_l2com;
-
- ifp->if_softc = sc;
- if_initname(ifp, "rum", device_get_unit(sc->sc_dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = rum_init;
- ifp->if_ioctl = rum_ioctl;
- ifp->if_start = rum_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(self);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_IBSS /* IBSS mode supported */
| IEEE80211_C_MONITOR /* monitor mode supported */
| IEEE80211_C_HOSTAP /* HostAp mode supported */
| IEEE80211_C_TXPMGT /* tx power management */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_BGSCAN /* bg scanning supported */
| IEEE80211_C_WPA /* 802.11i */
;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_5226)
setbit(&bands, IEEE80211_MODE_11A);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, sc->sc_bssid);
+ ieee80211_ifattach(ic);
ic->ic_update_promisc = rum_update_promisc;
ic->ic_raw_xmit = rum_raw_xmit;
ic->ic_scan_start = rum_scan_start;
ic->ic_scan_end = rum_scan_end;
ic->ic_set_channel = rum_set_channel;
-
+ ic->ic_transmit = rum_transmit;
+ ic->ic_parent = rum_parent;
ic->ic_vap_create = rum_vap_create;
ic->ic_vap_delete = rum_vap_delete;
ic->ic_update_mcast = rum_update_mcast;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
RT2573_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
RT2573_RX_RADIOTAP_PRESENT);
if (bootverbose)
ieee80211_announce(ic);
return (0);
detach:
rum_detach(self);
return (ENXIO); /* failure */
}
static int
rum_detach(device_t self)
{
struct rum_softc *sc = device_get_softc(self);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic;
/* Prevent further ioctls */
RUM_LOCK(sc);
sc->sc_detached = 1;
RUM_UNLOCK(sc);
/* stop all USB transfers */
usbd_transfer_unsetup(sc->sc_xfer, RUM_N_TRANSFER);
/* free TX list, if any */
RUM_LOCK(sc);
rum_unsetup_tx_list(sc);
RUM_UNLOCK(sc);
- if (ifp) {
- ic = ifp->if_l2com;
- ieee80211_ifdetach(ic);
- if_free(ifp);
- }
+ if (sc->sc_ic.ic_softc == sc)
+ ieee80211_ifdetach(&sc->sc_ic);
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static usb_error_t
rum_do_request(struct rum_softc *sc,
struct usb_device_request *req, void *data)
{
usb_error_t err;
int ntries = 10;
while (ntries--) {
err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
req, data, 0, NULL, 250 /* ms */);
if (err == 0)
break;
DPRINTFN(1, "Control request failed, %s (retrying)\n",
usbd_errstr(err));
if (rum_pause(sc, hz / 100))
break;
}
return (err);
}
static struct ieee80211vap *
rum_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct rum_softc *sc = ic->ic_softc;
struct rum_vap *rvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
- rvp = (struct rum_vap *) malloc(sizeof(struct rum_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (rvp == NULL)
- return NULL;
+ rvp = malloc(sizeof(struct rum_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &rvp->vap;
/* enable s/w bmiss handling for sta mode */
if (ieee80211_vap_setup(ic, vap, name, unit, opmode,
- flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) {
+ flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) {
/* out of memory */
free(rvp, M_80211_VAP);
return (NULL);
}
/* override state transition machine */
rvp->newstate = vap->iv_newstate;
vap->iv_newstate = rum_newstate;
usb_callout_init_mtx(&rvp->ratectl_ch, &sc->sc_mtx, 0);
TASK_INIT(&rvp->ratectl_task, 0, rum_ratectl_task, rvp);
ieee80211_ratectl_init(vap);
ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */);
/* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, ieee80211_media_change,
+ ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return vap;
}
static void
rum_vap_delete(struct ieee80211vap *vap)
{
struct rum_vap *rvp = RUM_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
usb_callout_drain(&rvp->ratectl_ch);
ieee80211_draintask(ic, &rvp->ratectl_task);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(rvp, M_80211_VAP);
}
static void
rum_tx_free(struct rum_tx_data *data, int txerr)
{
struct rum_softc *sc = data->sc;
if (data->m != NULL) {
- if (data->m->m_flags & M_TXCB)
- ieee80211_process_callback(data->ni, data->m,
- txerr ? ETIMEDOUT : 0);
- m_freem(data->m);
+ ieee80211_tx_complete(data->ni, data->m, txerr);
data->m = NULL;
-
- ieee80211_free_node(data->ni);
data->ni = NULL;
}
STAILQ_INSERT_TAIL(&sc->tx_free, data, next);
sc->tx_nfree++;
}
static void
rum_setup_tx_list(struct rum_softc *sc)
{
struct rum_tx_data *data;
int i;
sc->tx_nfree = 0;
STAILQ_INIT(&sc->tx_q);
STAILQ_INIT(&sc->tx_free);
for (i = 0; i < RUM_TX_LIST_COUNT; i++) {
data = &sc->tx_data[i];
data->sc = sc;
STAILQ_INSERT_TAIL(&sc->tx_free, data, next);
sc->tx_nfree++;
}
}
static void
rum_unsetup_tx_list(struct rum_softc *sc)
{
struct rum_tx_data *data;
int i;
/* make sure any subsequent use of the queues will fail */
sc->tx_nfree = 0;
STAILQ_INIT(&sc->tx_q);
STAILQ_INIT(&sc->tx_free);
/* free up all node references and mbufs */
for (i = 0; i < RUM_TX_LIST_COUNT; i++) {
data = &sc->tx_data[i];
if (data->m != NULL) {
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
}
}
static int
rum_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct rum_vap *rvp = RUM_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct rum_softc *sc = ic->ic_softc;
const struct ieee80211_txparam *tp;
enum ieee80211_state ostate;
struct ieee80211_node *ni;
uint32_t tmp;
ostate = vap->iv_state;
DPRINTF("%s -> %s\n",
ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
RUM_LOCK(sc);
usb_callout_stop(&rvp->ratectl_ch);
switch (nstate) {
case IEEE80211_S_INIT:
if (ostate == IEEE80211_S_RUN) {
/* abort TSF synchronization */
tmp = rum_read(sc, RT2573_TXRX_CSR9);
rum_write(sc, RT2573_TXRX_CSR9, tmp & ~0x00ffffff);
}
break;
case IEEE80211_S_RUN:
ni = ieee80211_ref_node(vap->iv_bss);
if (vap->iv_opmode != IEEE80211_M_MONITOR) {
if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
RUM_UNLOCK(sc);
IEEE80211_LOCK(ic);
ieee80211_free_node(ni);
return (-1);
}
- rum_update_slot(ic->ic_ifp);
+ rum_update_slot(sc);
rum_enable_mrr(sc);
rum_set_txpreamble(sc);
rum_set_basicrates(sc);
- IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid);
- rum_set_bssid(sc, sc->sc_bssid);
+ IEEE80211_ADDR_COPY(ic->ic_macaddr, ni->ni_bssid);
+ rum_set_bssid(sc, ic->ic_macaddr);
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS)
rum_prepare_beacon(sc, vap);
if (vap->iv_opmode != IEEE80211_M_MONITOR)
rum_enable_tsf_sync(sc);
else
rum_enable_tsf(sc);
/* enable automatic rate adaptation */
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE)
rum_ratectl_start(sc, ni);
ieee80211_free_node(ni);
break;
default:
break;
}
RUM_UNLOCK(sc);
IEEE80211_LOCK(ic);
return (rvp->newstate(vap, nstate, arg));
}
static void
rum_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct rum_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
struct ieee80211vap *vap;
struct rum_tx_data *data;
struct mbuf *m;
struct usb_page_cache *pc;
unsigned int len;
int actlen, sumlen;
usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(11, "transfer complete, %d bytes\n", actlen);
/* free resources */
data = usbd_xfer_get_priv(xfer);
rum_tx_free(data, 0);
usbd_xfer_set_priv(xfer, NULL);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
data = STAILQ_FIRST(&sc->tx_q);
if (data) {
STAILQ_REMOVE_HEAD(&sc->tx_q, next);
m = data->m;
if (m->m_pkthdr.len > (int)(MCLBYTES + RT2573_TX_DESC_SIZE)) {
DPRINTFN(0, "data overflow, %u bytes\n",
m->m_pkthdr.len);
m->m_pkthdr.len = (MCLBYTES + RT2573_TX_DESC_SIZE);
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &data->desc, RT2573_TX_DESC_SIZE);
usbd_m_copy_in(pc, RT2573_TX_DESC_SIZE, m, 0,
m->m_pkthdr.len);
vap = data->ni->ni_vap;
if (ieee80211_radiotap_active_vap(vap)) {
struct rum_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = data->rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(vap, m);
}
/* align end on a 4-bytes boundary */
len = (RT2573_TX_DESC_SIZE + m->m_pkthdr.len + 3) & ~3;
if ((len % 64) == 0)
len += 4;
DPRINTFN(11, "sending frame len=%u xferlen=%u\n",
m->m_pkthdr.len, len);
usbd_xfer_set_frame_len(xfer, 0, len);
usbd_xfer_set_priv(xfer, data);
usbd_transfer_submit(xfer);
}
- RUM_UNLOCK(sc);
- rum_start(ifp);
- RUM_LOCK(sc);
+ rum_start(sc);
break;
default: /* Error */
DPRINTFN(11, "transfer error, %s\n",
usbd_errstr(error));
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
data = usbd_xfer_get_priv(xfer);
if (data != NULL) {
rum_tx_free(data, error);
usbd_xfer_set_priv(xfer, NULL);
}
if (error != USB_ERR_CANCELLED) {
if (error == USB_ERR_TIMEOUT)
device_printf(sc->sc_dev, "device timeout\n");
/*
* Try to clear stall first, also if other
* errors occur, hence clearing stall
* introduces a 50 ms delay:
*/
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static void
rum_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct rum_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
struct mbuf *m = NULL;
struct usb_page_cache *pc;
uint32_t flags;
uint8_t rssi = 0;
int len;
usbd_xfer_status(xfer, &len, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(15, "rx done, actlen=%d\n", len);
if (len < (int)(RT2573_RX_DESC_SIZE + IEEE80211_MIN_LEN)) {
DPRINTF("%s: xfer too short %d\n",
device_get_nameunit(sc->sc_dev), len);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto tr_setup;
}
len -= RT2573_RX_DESC_SIZE;
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, &sc->sc_rx_desc, RT2573_RX_DESC_SIZE);
rssi = rum_get_rssi(sc, sc->sc_rx_desc.rssi);
flags = le32toh(sc->sc_rx_desc.flags);
if (flags & RT2573_RX_CRC_ERROR) {
/*
* This should not happen since we did not
* request to receive those frames when we
* filled RUM_TXRX_CSR2:
*/
DPRINTFN(5, "PHY or CRC error\n");
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto tr_setup;
}
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
DPRINTF("could not allocate mbuf\n");
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto tr_setup;
}
usbd_copy_out(pc, RT2573_RX_DESC_SIZE,
mtod(m, uint8_t *), len);
/* finalize mbuf */
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = (flags >> 16) & 0xfff;
if (ieee80211_radiotap_active(ic)) {
struct rum_rx_radiotap_header *tap = &sc->sc_rxtap;
/* XXX read tsf */
tap->wr_flags = 0;
tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate,
(flags & RT2573_RX_OFDM) ?
IEEE80211_T_OFDM : IEEE80211_T_CCK);
tap->wr_antsignal = RT2573_NOISE_FLOOR + rssi;
tap->wr_antnoise = RT2573_NOISE_FLOOR;
tap->wr_antenna = sc->rx_ant;
}
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
/*
* At the end of a USB callback it is always safe to unlock
* the private mutex of a device! That is why we do the
* "ieee80211_input" here, and not some lines up!
*/
RUM_UNLOCK(sc);
if (m) {
ni = ieee80211_find_rxnode(ic,
mtod(m, struct ieee80211_frame_min *));
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi,
RT2573_NOISE_FLOOR);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi,
RT2573_NOISE_FLOOR);
}
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
- !IFQ_IS_EMPTY(&ifp->if_snd))
- rum_start(ifp);
RUM_LOCK(sc);
+ rum_start(sc);
return;
default: /* Error */
if (error != USB_ERR_CANCELLED) {
/* try to clear stall first */
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
return;
}
}
static uint8_t
rum_plcp_signal(int rate)
{
switch (rate) {
/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
case 12: return 0xb;
case 18: return 0xf;
case 24: return 0xa;
case 36: return 0xe;
case 48: return 0x9;
case 72: return 0xd;
case 96: return 0x8;
case 108: return 0xc;
/* CCK rates (NB: not IEEE std, device-specific) */
case 2: return 0x0;
case 4: return 0x1;
case 11: return 0x2;
case 22: return 0x3;
}
return 0xff; /* XXX unsupported/unknown rate */
}
static void
rum_setup_tx_desc(struct rum_softc *sc, struct rum_tx_desc *desc,
uint32_t flags, uint16_t xflags, int len, int rate)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t plcp_length;
int remainder;
desc->flags = htole32(flags);
desc->flags |= htole32(RT2573_TX_VALID);
desc->flags |= htole32(len << 16);
desc->xflags = htole16(xflags);
desc->wme = htole16(RT2573_QID(0) | RT2573_AIFSN(2) |
RT2573_LOGCWMIN(4) | RT2573_LOGCWMAX(10));
/* setup PLCP fields */
desc->plcp_signal = rum_plcp_signal(rate);
desc->plcp_service = 4;
len += IEEE80211_CRC_LEN;
if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) {
desc->flags |= htole32(RT2573_TX_OFDM);
plcp_length = len & 0xfff;
desc->plcp_length_hi = plcp_length >> 6;
desc->plcp_length_lo = plcp_length & 0x3f;
} else {
if (rate == 0)
rate = 2; /* avoid division by zero */
plcp_length = (16 * len + rate - 1) / rate;
if (rate == 22) {
remainder = (16 * len) % 22;
if (remainder != 0 && remainder < 7)
desc->plcp_service |= RT2573_PLCP_LENGEXT;
}
desc->plcp_length_hi = plcp_length >> 8;
desc->plcp_length_lo = plcp_length & 0xff;
if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
desc->plcp_signal |= 0x08;
}
}
static int
rum_sendprot(struct rum_softc *sc,
const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate)
{
struct ieee80211com *ic = ni->ni_ic;
const struct ieee80211_frame *wh;
struct rum_tx_data *data;
struct mbuf *mprot;
int protrate, ackrate, pktlen, flags, isshort;
uint16_t dur;
RUM_LOCK_ASSERT(sc, MA_OWNED);
KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY,
("protection %d", prot));
wh = mtod(m, const struct ieee80211_frame *);
pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN;
protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
ackrate = ieee80211_ack_rate(ic->ic_rt, rate);
isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0;
dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort)
+ ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags = RT2573_TX_MORE_FRAG;
if (prot == IEEE80211_PROT_RTSCTS) {
/* NB: CTS is the same size as an ACK */
dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags |= RT2573_TX_NEED_ACK;
mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur);
} else {
mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur);
}
if (mprot == NULL) {
/* XXX stat + msg */
return (ENOBUFS);
}
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
data->m = mprot;
data->ni = ieee80211_ref_node(ni);
data->rate = protrate;
rum_setup_tx_desc(sc, &data->desc, flags, 0, mprot->m_pkthdr.len, protrate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]);
return 0;
}
static int
rum_tx_mgt(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct rum_tx_data *data;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp;
struct ieee80211_key *k;
uint32_t flags = 0;
uint16_t dur;
RUM_LOCK_ASSERT(sc, MA_OWNED);
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
wh = mtod(m0, struct ieee80211_frame *);
}
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2573_TX_NEED_ACK;
dur = ieee80211_ack_duration(ic->ic_rt, tp->mgmtrate,
ic->ic_flags & IEEE80211_F_SHPREAMBLE);
USETW(wh->i_dur, dur);
/* tell hardware to add timestamp for probe responses */
if ((wh->i_fc[0] &
(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
(IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
flags |= RT2573_TX_TIMESTAMP;
}
data->m = m0;
data->ni = ni;
data->rate = tp->mgmtrate;
rum_setup_tx_desc(sc, &data->desc, flags, 0, m0->m_pkthdr.len, tp->mgmtrate);
DPRINTFN(10, "sending mgt frame len=%d rate=%d\n",
m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, tp->mgmtrate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]);
return (0);
}
static int
rum_tx_raw(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
struct rum_tx_data *data;
uint32_t flags;
int rate, error;
RUM_LOCK_ASSERT(sc, MA_OWNED);
KASSERT(params != NULL, ("no raw xmit params"));
rate = params->ibp_rate0;
if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
m_freem(m0);
return EINVAL;
}
flags = 0;
if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
flags |= RT2573_TX_NEED_ACK;
if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) {
error = rum_sendprot(sc, m0, ni,
params->ibp_flags & IEEE80211_BPF_RTS ?
IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY,
rate);
if (error || sc->tx_nfree == 0) {
m_freem(m0);
return ENOBUFS;
}
flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS;
}
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
data->m = m0;
data->ni = ni;
data->rate = rate;
/* XXX need to setup descriptor ourself */
rum_setup_tx_desc(sc, &data->desc, flags, 0, m0->m_pkthdr.len, rate);
DPRINTFN(10, "sending raw frame len=%u rate=%u\n",
m0->m_pkthdr.len, rate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]);
return 0;
}
static int
rum_tx_data(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct rum_tx_data *data;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp;
struct ieee80211_key *k;
uint32_t flags = 0;
uint16_t dur;
int error, rate;
RUM_LOCK_ASSERT(sc, MA_OWNED);
wh = mtod(m0, struct ieee80211_frame *);
tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
rate = tp->mcastrate;
else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rate = tp->ucastrate;
else
rate = ni->ni_txrate;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
int prot = IEEE80211_PROT_NONE;
if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
prot = IEEE80211_PROT_RTSCTS;
else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM)
prot = ic->ic_protmode;
if (prot != IEEE80211_PROT_NONE) {
error = rum_sendprot(sc, m0, ni, prot, rate);
if (error || sc->tx_nfree == 0) {
m_freem(m0);
return ENOBUFS;
}
flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS;
}
}
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
data->m = m0;
data->ni = ni;
data->rate = rate;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RT2573_TX_NEED_ACK;
flags |= RT2573_TX_MORE_FRAG;
dur = ieee80211_ack_duration(ic->ic_rt, rate,
ic->ic_flags & IEEE80211_F_SHPREAMBLE);
USETW(wh->i_dur, dur);
}
rum_setup_tx_desc(sc, &data->desc, flags, 0, m0->m_pkthdr.len, rate);
DPRINTFN(10, "sending frame len=%d rate=%d\n",
m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, rate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]);
return 0;
}
+static int
+rum_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct rum_softc *sc = ic->ic_softc;
+ int error;
+
+ RUM_LOCK(sc);
+ if (!sc->sc_running) {
+ RUM_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ RUM_UNLOCK(sc);
+ return (error);
+ }
+ rum_start(sc);
+ RUM_UNLOCK(sc);
+
+ return (0);
+}
+
static void
-rum_start(struct ifnet *ifp)
+rum_start(struct rum_softc *sc)
{
- struct rum_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
- RUM_LOCK(sc);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- RUM_UNLOCK(sc);
+ RUM_LOCK_ASSERT(sc, MA_OWNED);
+
+ if (!sc->sc_running)
return;
- }
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
- if (sc->tx_nfree < RUM_TX_MINFREE) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
+
+ while (sc->tx_nfree >= RUM_TX_MINFREE &&
+ (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
if (rum_tx_data(sc, m, ni) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
break;
}
}
- RUM_UNLOCK(sc);
}
-static int
-rum_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+rum_parent(struct ieee80211com *ic)
{
- struct ieee80211com *ic = ifp->if_l2com;
struct rum_softc *sc = ic->ic_softc;
- struct ifreq *ifr = (struct ifreq *) data;
- int error;
int startall = 0;
RUM_LOCK(sc);
- error = sc->sc_detached ? ENXIO : 0;
- RUM_UNLOCK(sc);
- if (error)
- return (error);
-
- switch (cmd) {
- case SIOCSIFFLAGS:
- RUM_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- rum_init_locked(sc);
- startall = 1;
- } else
- rum_setpromisc(sc);
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- rum_stop(sc);
- }
+ if (sc->sc_detached) {
RUM_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
+ return;
}
- return error;
+ if (ic->ic_nrunning > 0) {
+ if (!sc->sc_running) {
+ rum_init(sc);
+ startall = 1;
+ } else
+ rum_setpromisc(sc);
+ } else if (sc->sc_running)
+ rum_stop(sc);
+ RUM_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static void
rum_eeprom_read(struct rum_softc *sc, uint16_t addr, void *buf, int len)
{
struct usb_device_request req;
usb_error_t error;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = RT2573_READ_EEPROM;
USETW(req.wValue, 0);
USETW(req.wIndex, addr);
USETW(req.wLength, len);
error = rum_do_request(sc, &req, buf);
if (error != 0) {
device_printf(sc->sc_dev, "could not read EEPROM: %s\n",
usbd_errstr(error));
}
}
static uint32_t
rum_read(struct rum_softc *sc, uint16_t reg)
{
uint32_t val;
rum_read_multi(sc, reg, &val, sizeof val);
return le32toh(val);
}
static void
rum_read_multi(struct rum_softc *sc, uint16_t reg, void *buf, int len)
{
struct usb_device_request req;
usb_error_t error;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = RT2573_READ_MULTI_MAC;
USETW(req.wValue, 0);
USETW(req.wIndex, reg);
USETW(req.wLength, len);
error = rum_do_request(sc, &req, buf);
if (error != 0) {
device_printf(sc->sc_dev,
"could not multi read MAC register: %s\n",
usbd_errstr(error));
}
}
static usb_error_t
rum_write(struct rum_softc *sc, uint16_t reg, uint32_t val)
{
uint32_t tmp = htole32(val);
return (rum_write_multi(sc, reg, &tmp, sizeof tmp));
}
static usb_error_t
rum_write_multi(struct rum_softc *sc, uint16_t reg, void *buf, size_t len)
{
struct usb_device_request req;
usb_error_t error;
size_t offset;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = RT2573_WRITE_MULTI_MAC;
USETW(req.wValue, 0);
/* write at most 64 bytes at a time */
for (offset = 0; offset < len; offset += 64) {
USETW(req.wIndex, reg + offset);
USETW(req.wLength, MIN(len - offset, 64));
error = rum_do_request(sc, &req, (char *)buf + offset);
if (error != 0) {
device_printf(sc->sc_dev,
"could not multi write MAC register: %s\n",
usbd_errstr(error));
return (error);
}
}
return (USB_ERR_NORMAL_COMPLETION);
}
static void
rum_bbp_write(struct rum_softc *sc, uint8_t reg, uint8_t val)
{
uint32_t tmp;
int ntries;
DPRINTFN(2, "reg=0x%08x\n", reg);
for (ntries = 0; ntries < 100; ntries++) {
if (!(rum_read(sc, RT2573_PHY_CSR3) & RT2573_BBP_BUSY))
break;
if (rum_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to BBP\n");
return;
}
tmp = RT2573_BBP_BUSY | (reg & 0x7f) << 8 | val;
rum_write(sc, RT2573_PHY_CSR3, tmp);
}
static uint8_t
rum_bbp_read(struct rum_softc *sc, uint8_t reg)
{
uint32_t val;
int ntries;
DPRINTFN(2, "reg=0x%08x\n", reg);
for (ntries = 0; ntries < 100; ntries++) {
if (!(rum_read(sc, RT2573_PHY_CSR3) & RT2573_BBP_BUSY))
break;
if (rum_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not read BBP\n");
return 0;
}
val = RT2573_BBP_BUSY | RT2573_BBP_READ | reg << 8;
rum_write(sc, RT2573_PHY_CSR3, val);
for (ntries = 0; ntries < 100; ntries++) {
val = rum_read(sc, RT2573_PHY_CSR3);
if (!(val & RT2573_BBP_BUSY))
return val & 0xff;
if (rum_pause(sc, hz / 100))
break;
}
device_printf(sc->sc_dev, "could not read BBP\n");
return 0;
}
static void
rum_rf_write(struct rum_softc *sc, uint8_t reg, uint32_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(rum_read(sc, RT2573_PHY_CSR4) & RT2573_RF_BUSY))
break;
if (rum_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to RF\n");
return;
}
tmp = RT2573_RF_BUSY | RT2573_RF_20BIT | (val & 0xfffff) << 2 |
(reg & 3);
rum_write(sc, RT2573_PHY_CSR4, tmp);
/* remember last written value in sc */
sc->rf_regs[reg] = val;
DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0xfffff);
}
static void
rum_select_antenna(struct rum_softc *sc)
{
uint8_t bbp4, bbp77;
uint32_t tmp;
bbp4 = rum_bbp_read(sc, 4);
bbp77 = rum_bbp_read(sc, 77);
/* TBD */
/* make sure Rx is disabled before switching antenna */
tmp = rum_read(sc, RT2573_TXRX_CSR0);
rum_write(sc, RT2573_TXRX_CSR0, tmp | RT2573_DISABLE_RX);
rum_bbp_write(sc, 4, bbp4);
rum_bbp_write(sc, 77, bbp77);
rum_write(sc, RT2573_TXRX_CSR0, tmp);
}
/*
* Enable multi-rate retries for frames sent at OFDM rates.
* In 802.11b/g mode, allow fallback to CCK rates.
*/
static void
rum_enable_mrr(struct rum_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
tmp = rum_read(sc, RT2573_TXRX_CSR4);
tmp &= ~RT2573_MRR_CCK_FALLBACK;
if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
tmp |= RT2573_MRR_CCK_FALLBACK;
tmp |= RT2573_MRR_ENABLED;
rum_write(sc, RT2573_TXRX_CSR4, tmp);
}
static void
rum_set_txpreamble(struct rum_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
tmp = rum_read(sc, RT2573_TXRX_CSR4);
tmp &= ~RT2573_SHORT_PREAMBLE;
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
tmp |= RT2573_SHORT_PREAMBLE;
rum_write(sc, RT2573_TXRX_CSR4, tmp);
}
static void
rum_set_basicrates(struct rum_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
/* update basic rate set */
if (ic->ic_curmode == IEEE80211_MODE_11B) {
/* 11b basic rates: 1, 2Mbps */
rum_write(sc, RT2573_TXRX_CSR5, 0x3);
} else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) {
/* 11a basic rates: 6, 12, 24Mbps */
rum_write(sc, RT2573_TXRX_CSR5, 0x150);
} else {
/* 11b/g basic rates: 1, 2, 5.5, 11Mbps */
rum_write(sc, RT2573_TXRX_CSR5, 0xf);
}
}
/*
* Reprogram MAC/BBP to switch to a new band. Values taken from the reference
* driver.
*/
static void
rum_select_band(struct rum_softc *sc, struct ieee80211_channel *c)
{
uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104;
uint32_t tmp;
/* update all BBP registers that depend on the band */
bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c;
bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48;
if (IEEE80211_IS_CHAN_5GHZ(c)) {
bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c;
bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10;
}
if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) ||
(IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) {
bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10;
}
sc->bbp17 = bbp17;
rum_bbp_write(sc, 17, bbp17);
rum_bbp_write(sc, 96, bbp96);
rum_bbp_write(sc, 104, bbp104);
if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) ||
(IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) {
rum_bbp_write(sc, 75, 0x80);
rum_bbp_write(sc, 86, 0x80);
rum_bbp_write(sc, 88, 0x80);
}
rum_bbp_write(sc, 35, bbp35);
rum_bbp_write(sc, 97, bbp97);
rum_bbp_write(sc, 98, bbp98);
tmp = rum_read(sc, RT2573_PHY_CSR0);
tmp &= ~(RT2573_PA_PE_2GHZ | RT2573_PA_PE_5GHZ);
if (IEEE80211_IS_CHAN_2GHZ(c))
tmp |= RT2573_PA_PE_2GHZ;
else
tmp |= RT2573_PA_PE_5GHZ;
rum_write(sc, RT2573_PHY_CSR0, tmp);
}
static void
rum_set_chan(struct rum_softc *sc, struct ieee80211_channel *c)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct rfprog *rfprog;
uint8_t bbp3, bbp94 = RT2573_BBPR94_DEFAULT;
int8_t power;
int i, chan;
chan = ieee80211_chan2ieee(ic, c);
if (chan == 0 || chan == IEEE80211_CHAN_ANY)
return;
/* select the appropriate RF settings based on what EEPROM says */
rfprog = (sc->rf_rev == RT2573_RF_5225 ||
sc->rf_rev == RT2573_RF_2527) ? rum_rf5225 : rum_rf5226;
/* find the settings for this channel (we know it exists) */
for (i = 0; rfprog[i].chan != chan; i++);
power = sc->txpow[i];
if (power < 0) {
bbp94 += power;
power = 0;
} else if (power > 31) {
bbp94 += power - 31;
power = 31;
}
/*
* If we are switching from the 2GHz band to the 5GHz band or
* vice-versa, BBP registers need to be reprogrammed.
*/
if (c->ic_flags != ic->ic_curchan->ic_flags) {
rum_select_band(sc, c);
rum_select_antenna(sc);
}
ic->ic_curchan = c;
rum_rf_write(sc, RT2573_RF1, rfprog[i].r1);
rum_rf_write(sc, RT2573_RF2, rfprog[i].r2);
rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7);
rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10);
rum_rf_write(sc, RT2573_RF1, rfprog[i].r1);
rum_rf_write(sc, RT2573_RF2, rfprog[i].r2);
rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7 | 1);
rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10);
rum_rf_write(sc, RT2573_RF1, rfprog[i].r1);
rum_rf_write(sc, RT2573_RF2, rfprog[i].r2);
rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7);
rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10);
rum_pause(sc, hz / 100);
/* enable smart mode for MIMO-capable RFs */
bbp3 = rum_bbp_read(sc, 3);
bbp3 &= ~RT2573_SMART_MODE;
if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527)
bbp3 |= RT2573_SMART_MODE;
rum_bbp_write(sc, 3, bbp3);
if (bbp94 != RT2573_BBPR94_DEFAULT)
rum_bbp_write(sc, 94, bbp94);
/* give the chip some extra time to do the switchover */
rum_pause(sc, hz / 100);
}
/*
* Enable TSF synchronization and tell h/w to start sending beacons for IBSS
* and HostAP operating modes.
*/
static void
rum_enable_tsf_sync(struct rum_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
if (vap->iv_opmode != IEEE80211_M_STA) {
/*
* Change default 16ms TBTT adjustment to 8ms.
* Must be done before enabling beacon generation.
*/
rum_write(sc, RT2573_TXRX_CSR10, 1 << 12 | 8);
}
tmp = rum_read(sc, RT2573_TXRX_CSR9) & 0xff000000;
/* set beacon interval (in 1/16ms unit) */
tmp |= vap->iv_bss->ni_intval * 16;
tmp |= RT2573_TSF_TICKING | RT2573_ENABLE_TBTT;
if (vap->iv_opmode == IEEE80211_M_STA)
tmp |= RT2573_TSF_MODE(1);
else
tmp |= RT2573_TSF_MODE(2) | RT2573_GENERATE_BEACON;
rum_write(sc, RT2573_TXRX_CSR9, tmp);
}
static void
rum_enable_tsf(struct rum_softc *sc)
{
rum_write(sc, RT2573_TXRX_CSR9,
(rum_read(sc, RT2573_TXRX_CSR9) & 0xff000000) |
RT2573_TSF_TICKING | RT2573_TSF_MODE(2));
}
static void
-rum_update_slot(struct ifnet *ifp)
+rum_update_slot(struct rum_softc *sc)
{
- struct ieee80211com *ic = ifp->if_l2com;
- struct rum_softc *sc = ic->ic_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t slottime;
uint32_t tmp;
slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20;
tmp = rum_read(sc, RT2573_MAC_CSR9);
tmp = (tmp & ~0xff) | slottime;
rum_write(sc, RT2573_MAC_CSR9, tmp);
DPRINTF("setting slot time to %uus\n", slottime);
}
static void
rum_set_bssid(struct rum_softc *sc, const uint8_t *bssid)
{
uint32_t tmp;
tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24;
rum_write(sc, RT2573_MAC_CSR4, tmp);
tmp = bssid[4] | bssid[5] << 8 | RT2573_ONE_BSSID << 16;
rum_write(sc, RT2573_MAC_CSR5, tmp);
}
static void
rum_set_macaddr(struct rum_softc *sc, const uint8_t *addr)
{
uint32_t tmp;
tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24;
rum_write(sc, RT2573_MAC_CSR2, tmp);
tmp = addr[4] | addr[5] << 8 | 0xff << 16;
rum_write(sc, RT2573_MAC_CSR3, tmp);
}
static void
rum_setpromisc(struct rum_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
uint32_t tmp;
tmp = rum_read(sc, RT2573_TXRX_CSR0);
tmp &= ~RT2573_DROP_NOT_TO_ME;
- if (!(ifp->if_flags & IFF_PROMISC))
+ if (sc->sc_ic.ic_promisc == 0)
tmp |= RT2573_DROP_NOT_TO_ME;
rum_write(sc, RT2573_TXRX_CSR0, tmp);
- DPRINTF("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ?
+ DPRINTF("%s promiscuous mode\n", sc->sc_ic.ic_promisc > 0 ?
"entering" : "leaving");
}
static void
rum_update_promisc(struct ieee80211com *ic)
{
struct rum_softc *sc = ic->ic_softc;
- if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
RUM_LOCK(sc);
+ if (!sc->sc_running) {
+ RUM_UNLOCK(sc);
+ return;
+ }
rum_setpromisc(sc);
RUM_UNLOCK(sc);
}
static void
rum_update_mcast(struct ieee80211com *ic)
{
static int warning_printed;
if (warning_printed == 0) {
ic_printf(ic, "need to implement %s\n", __func__);
warning_printed = 1;
}
}
static const char *
rum_get_rf(int rev)
{
switch (rev) {
case RT2573_RF_2527: return "RT2527 (MIMO XR)";
case RT2573_RF_2528: return "RT2528";
case RT2573_RF_5225: return "RT5225 (MIMO XR)";
case RT2573_RF_5226: return "RT5226";
default: return "unknown";
}
}
static void
rum_read_eeprom(struct rum_softc *sc)
{
uint16_t val;
#ifdef RUM_DEBUG
int i;
#endif
/* read MAC address */
- rum_eeprom_read(sc, RT2573_EEPROM_ADDRESS, sc->sc_bssid, 6);
+ rum_eeprom_read(sc, RT2573_EEPROM_ADDRESS, sc->sc_ic.ic_macaddr, 6);
rum_eeprom_read(sc, RT2573_EEPROM_ANTENNA, &val, 2);
val = le16toh(val);
sc->rf_rev = (val >> 11) & 0x1f;
sc->hw_radio = (val >> 10) & 0x1;
sc->rx_ant = (val >> 4) & 0x3;
sc->tx_ant = (val >> 2) & 0x3;
sc->nb_ant = val & 0x3;
DPRINTF("RF revision=%d\n", sc->rf_rev);
rum_eeprom_read(sc, RT2573_EEPROM_CONFIG2, &val, 2);
val = le16toh(val);
sc->ext_5ghz_lna = (val >> 6) & 0x1;
sc->ext_2ghz_lna = (val >> 4) & 0x1;
DPRINTF("External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n",
sc->ext_2ghz_lna, sc->ext_5ghz_lna);
rum_eeprom_read(sc, RT2573_EEPROM_RSSI_2GHZ_OFFSET, &val, 2);
val = le16toh(val);
if ((val & 0xff) != 0xff)
sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */
/* Only [-10, 10] is valid */
if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10)
sc->rssi_2ghz_corr = 0;
rum_eeprom_read(sc, RT2573_EEPROM_RSSI_5GHZ_OFFSET, &val, 2);
val = le16toh(val);
if ((val & 0xff) != 0xff)
sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */
/* Only [-10, 10] is valid */
if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10)
sc->rssi_5ghz_corr = 0;
if (sc->ext_2ghz_lna)
sc->rssi_2ghz_corr -= 14;
if (sc->ext_5ghz_lna)
sc->rssi_5ghz_corr -= 14;
DPRINTF("RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n",
sc->rssi_2ghz_corr, sc->rssi_5ghz_corr);
rum_eeprom_read(sc, RT2573_EEPROM_FREQ_OFFSET, &val, 2);
val = le16toh(val);
if ((val & 0xff) != 0xff)
sc->rffreq = val & 0xff;
DPRINTF("RF freq=%d\n", sc->rffreq);
/* read Tx power for all a/b/g channels */
rum_eeprom_read(sc, RT2573_EEPROM_TXPOWER, sc->txpow, 14);
/* XXX default Tx power for 802.11a channels */
memset(sc->txpow + 14, 24, sizeof (sc->txpow) - 14);
#ifdef RUM_DEBUG
for (i = 0; i < 14; i++)
DPRINTF("Channel=%d Tx power=%d\n", i + 1, sc->txpow[i]);
#endif
/* read default values for BBP registers */
rum_eeprom_read(sc, RT2573_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16);
#ifdef RUM_DEBUG
for (i = 0; i < 14; i++) {
if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff)
continue;
DPRINTF("BBP R%d=%02x\n", sc->bbp_prom[i].reg,
sc->bbp_prom[i].val);
}
#endif
}
static int
rum_bbp_init(struct rum_softc *sc)
{
int i, ntries;
/* wait for BBP to be ready */
for (ntries = 0; ntries < 100; ntries++) {
const uint8_t val = rum_bbp_read(sc, 0);
if (val != 0 && val != 0xff)
break;
if (rum_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for BBP\n");
return EIO;
}
/* initialize BBP registers to default values */
for (i = 0; i < N(rum_def_bbp); i++)
rum_bbp_write(sc, rum_def_bbp[i].reg, rum_def_bbp[i].val);
/* write vendor-specific BBP values (from EEPROM) */
for (i = 0; i < 16; i++) {
if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff)
continue;
rum_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
}
return 0;
}
static void
-rum_init_locked(struct rum_softc *sc)
+rum_init(struct rum_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
usb_error_t error;
int i, ntries;
RUM_LOCK_ASSERT(sc, MA_OWNED);
rum_stop(sc);
/* initialize MAC registers to default values */
for (i = 0; i < N(rum_def_mac); i++)
rum_write(sc, rum_def_mac[i].reg, rum_def_mac[i].val);
/* set host ready */
rum_write(sc, RT2573_MAC_CSR1, 3);
rum_write(sc, RT2573_MAC_CSR1, 0);
/* wait for BBP/RF to wakeup */
for (ntries = 0; ntries < 100; ntries++) {
if (rum_read(sc, RT2573_MAC_CSR12) & 8)
break;
rum_write(sc, RT2573_MAC_CSR12, 4); /* force wakeup */
if (rum_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"timeout waiting for BBP/RF to wakeup\n");
goto fail;
}
if ((error = rum_bbp_init(sc)) != 0)
goto fail;
/* select default channel */
rum_select_band(sc, ic->ic_curchan);
rum_select_antenna(sc);
rum_set_chan(sc, ic->ic_curchan);
/* clear STA registers */
rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta);
- rum_set_macaddr(sc, IF_LLADDR(ifp));
+ rum_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
/* initialize ASIC */
rum_write(sc, RT2573_MAC_CSR1, 4);
/*
* Allocate Tx and Rx xfer queues.
*/
rum_setup_tx_list(sc);
/* update Rx filter */
tmp = rum_read(sc, RT2573_TXRX_CSR0) & 0xffff;
tmp |= RT2573_DROP_PHY_ERROR | RT2573_DROP_CRC_ERROR;
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
tmp |= RT2573_DROP_CTL | RT2573_DROP_VER_ERROR |
RT2573_DROP_ACKCTS;
if (ic->ic_opmode != IEEE80211_M_HOSTAP)
tmp |= RT2573_DROP_TODS;
- if (!(ifp->if_flags & IFF_PROMISC))
+ if (ic->ic_promisc == 0)
tmp |= RT2573_DROP_NOT_TO_ME;
}
rum_write(sc, RT2573_TXRX_CSR0, tmp);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_running = 1;
usbd_xfer_set_stall(sc->sc_xfer[RUM_BULK_WR]);
usbd_transfer_start(sc->sc_xfer[RUM_BULK_RD]);
return;
fail: rum_stop(sc);
#undef N
}
static void
-rum_init(void *priv)
-{
- struct rum_softc *sc = priv;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
-
- RUM_LOCK(sc);
- rum_init_locked(sc);
- RUM_UNLOCK(sc);
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ieee80211_start_all(ic); /* start all vap's */
-}
-
-static void
rum_stop(struct rum_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
uint32_t tmp;
RUM_LOCK_ASSERT(sc, MA_OWNED);
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_running = 0;
RUM_UNLOCK(sc);
/*
* Drain the USB transfers, if not already drained:
*/
usbd_transfer_drain(sc->sc_xfer[RUM_BULK_WR]);
usbd_transfer_drain(sc->sc_xfer[RUM_BULK_RD]);
RUM_LOCK(sc);
rum_unsetup_tx_list(sc);
/* disable Rx */
tmp = rum_read(sc, RT2573_TXRX_CSR0);
rum_write(sc, RT2573_TXRX_CSR0, tmp | RT2573_DISABLE_RX);
/* reset ASIC */
rum_write(sc, RT2573_MAC_CSR1, 3);
rum_write(sc, RT2573_MAC_CSR1, 0);
}
static void
rum_load_microcode(struct rum_softc *sc, const uint8_t *ucode, size_t size)
{
struct usb_device_request req;
uint16_t reg = RT2573_MCU_CODE_BASE;
usb_error_t err;
/* copy firmware image into NIC */
for (; size >= 4; reg += 4, ucode += 4, size -= 4) {
err = rum_write(sc, reg, UGETDW(ucode));
if (err) {
/* firmware already loaded ? */
device_printf(sc->sc_dev, "Firmware load "
"failure! (ignored)\n");
break;
}
}
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = RT2573_MCU_CNTL;
USETW(req.wValue, RT2573_MCU_RUN);
USETW(req.wIndex, 0);
USETW(req.wLength, 0);
err = rum_do_request(sc, &req, NULL);
if (err != 0) {
device_printf(sc->sc_dev, "could not run firmware: %s\n",
usbd_errstr(err));
}
/* give the chip some time to boot */
rum_pause(sc, hz / 8);
}
static void
rum_prepare_beacon(struct rum_softc *sc, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
const struct ieee80211_txparam *tp;
struct rum_tx_desc desc;
struct mbuf *m0;
if (vap->iv_bss->ni_chan == IEEE80211_CHAN_ANYC)
return;
if (ic->ic_bsschan == IEEE80211_CHAN_ANYC)
return;
m0 = ieee80211_beacon_alloc(vap->iv_bss, &RUM_VAP(vap)->bo);
if (m0 == NULL)
return;
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)];
rum_setup_tx_desc(sc, &desc, RT2573_TX_TIMESTAMP, RT2573_TX_HWSEQ,
m0->m_pkthdr.len, tp->mgmtrate);
/* copy the first 24 bytes of Tx descriptor into NIC memory */
rum_write_multi(sc, RT2573_HW_BEACON_BASE0, (uint8_t *)&desc, 24);
/* copy beacon header and payload into NIC memory */
rum_write_multi(sc, RT2573_HW_BEACON_BASE0 + 24, mtod(m0, uint8_t *),
m0->m_pkthdr.len);
m_freem(m0);
}
static int
rum_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
- struct ifnet *ifp = ni->ni_ic->ic_ifp;
struct rum_softc *sc = ni->ni_ic->ic_softc;
RUM_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!sc->sc_running) {
RUM_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return ENETDOWN;
}
if (sc->tx_nfree < RUM_TX_MINFREE) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
RUM_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return EIO;
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
if (params == NULL) {
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
*/
if (rum_tx_mgt(sc, m, ni) != 0)
goto bad;
} else {
/*
* Caller supplied explicit parameters to use in
* sending the frame.
*/
if (rum_tx_raw(sc, m, ni, params) != 0)
goto bad;
}
RUM_UNLOCK(sc);
return 0;
bad:
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
RUM_UNLOCK(sc);
ieee80211_free_node(ni);
return EIO;
}
static void
rum_ratectl_start(struct rum_softc *sc, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct rum_vap *rvp = RUM_VAP(vap);
/* clear statistic registers (STA_CSR0 to STA_CSR5) */
rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta);
usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp);
}
static void
rum_ratectl_timeout(void *arg)
{
struct rum_vap *rvp = arg;
struct ieee80211vap *vap = &rvp->vap;
struct ieee80211com *ic = vap->iv_ic;
ieee80211_runtask(ic, &rvp->ratectl_task);
}
static void
rum_ratectl_task(void *arg, int pending)
{
struct rum_vap *rvp = arg;
struct ieee80211vap *vap = &rvp->vap;
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct rum_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
int ok, fail;
int sum, retrycnt;
RUM_LOCK(sc);
/* read and clear statistic registers (STA_CSR0 to STA_CSR10) */
rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof(sc->sta));
ok = (le32toh(sc->sta[4]) >> 16) + /* TX ok w/o retry */
(le32toh(sc->sta[5]) & 0xffff); /* TX ok w/ retry */
fail = (le32toh(sc->sta[5]) >> 16); /* TX retry-fail count */
sum = ok+fail;
retrycnt = (le32toh(sc->sta[5]) & 0xffff) + fail;
ni = ieee80211_ref_node(vap->iv_bss);
ieee80211_ratectl_tx_update(vap, ni, &sum, &ok, &retrycnt);
(void) ieee80211_ratectl_rate(ni, NULL, 0);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, fail); /* count TX retry-fail as Tx errors */
+ /* count TX retry-fail as Tx errors */
+ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, fail);
usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp);
RUM_UNLOCK(sc);
}
static void
rum_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
struct rum_softc *sc = ic->ic_softc;
uint32_t tmp;
RUM_LOCK(sc);
/* abort TSF synchronization */
tmp = rum_read(sc, RT2573_TXRX_CSR9);
rum_write(sc, RT2573_TXRX_CSR9, tmp & ~0x00ffffff);
- rum_set_bssid(sc, ifp->if_broadcastaddr);
+ rum_set_bssid(sc, ieee80211broadcastaddr);
RUM_UNLOCK(sc);
}
static void
rum_scan_end(struct ieee80211com *ic)
{
struct rum_softc *sc = ic->ic_softc;
RUM_LOCK(sc);
rum_enable_tsf_sync(sc);
- rum_set_bssid(sc, sc->sc_bssid);
+ rum_set_bssid(sc, ic->ic_macaddr);
RUM_UNLOCK(sc);
}
static void
rum_set_channel(struct ieee80211com *ic)
{
struct rum_softc *sc = ic->ic_softc;
RUM_LOCK(sc);
rum_set_chan(sc, ic->ic_curchan);
RUM_UNLOCK(sc);
}
static int
rum_get_rssi(struct rum_softc *sc, uint8_t raw)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int lna, agc, rssi;
lna = (raw >> 5) & 0x3;
agc = raw & 0x1f;
if (lna == 0) {
/*
* No RSSI mapping
*
* NB: Since RSSI is relative to noise floor, -1 is
* adequate for caller to know error happened.
*/
return -1;
}
rssi = (2 * agc) - RT2573_NOISE_FLOOR;
if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) {
rssi += sc->rssi_2ghz_corr;
if (lna == 1)
rssi -= 64;
else if (lna == 2)
rssi -= 74;
else if (lna == 3)
rssi -= 90;
} else {
rssi += sc->rssi_5ghz_corr;
if (!sc->ext_5ghz_lna && lna != 1)
rssi += 4;
if (lna == 1)
rssi -= 64;
else if (lna == 2)
rssi -= 86;
else if (lna == 3)
rssi -= 100;
}
return rssi;
}
static int
rum_pause(struct rum_softc *sc, int timeout)
{
usb_pause_mtx(&sc->sc_mtx, timeout);
return (0);
}
static device_method_t rum_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, rum_match),
DEVMETHOD(device_attach, rum_attach),
DEVMETHOD(device_detach, rum_detach),
DEVMETHOD_END
};
static driver_t rum_driver = {
.name = "rum",
.methods = rum_methods,
.size = sizeof(struct rum_softc),
};
static devclass_t rum_devclass;
DRIVER_MODULE(rum, uhub, rum_driver, rum_devclass, NULL, 0);
MODULE_DEPEND(rum, wlan, 1, 1, 1);
MODULE_DEPEND(rum, usb, 1, 1, 1);
MODULE_VERSION(rum, 1);
Index: head/sys/dev/usb/wlan/if_rumvar.h
===================================================================
--- head/sys/dev/usb/wlan/if_rumvar.h (revision 287196)
+++ head/sys/dev/usb/wlan/if_rumvar.h (revision 287197)
@@ -1,135 +1,136 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2005, 2006 Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2006 Niall O'Higgins <niallo@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define RUM_TX_LIST_COUNT 8
#define RUM_TX_MINFREE 2
struct rum_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
uint8_t wr_antenna;
} __packed __aligned(8);
#define RT2573_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | \
(1 << IEEE80211_RADIOTAP_ANTENNA) | \
0)
struct rum_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
uint8_t wt_antenna;
} __packed __aligned(8);
#define RT2573_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_ANTENNA))
struct rum_softc;
struct rum_tx_data {
STAILQ_ENTRY(rum_tx_data) next;
struct rum_softc *sc;
struct rum_tx_desc desc;
struct mbuf *m;
struct ieee80211_node *ni;
int rate;
};
typedef STAILQ_HEAD(, rum_tx_data) rum_txdhead;
struct rum_vap {
struct ieee80211vap vap;
struct ieee80211_beacon_offsets bo;
struct usb_callout ratectl_ch;
struct task ratectl_task;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define RUM_VAP(vap) ((struct rum_vap *)(vap))
enum {
RUM_BULK_WR,
RUM_BULK_RD,
RUM_N_TRANSFER = 2,
};
struct rum_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct usb_device *sc_udev;
struct usb_xfer *sc_xfer[RUM_N_TRANSFER];
uint8_t rf_rev;
uint8_t rffreq;
struct rum_tx_data tx_data[RUM_TX_LIST_COUNT];
rum_txdhead tx_q;
rum_txdhead tx_free;
int tx_nfree;
struct rum_rx_desc sc_rx_desc;
struct mtx sc_mtx;
uint32_t sta[6];
uint32_t rf_regs[4];
uint8_t txpow[44];
- uint8_t sc_bssid[6];
- uint8_t sc_detached;
+ u_int sc_detached:1,
+ sc_running:1;
struct {
uint8_t val;
uint8_t reg;
} __packed bbp_prom[16];
int hw_radio;
int rx_ant;
int tx_ant;
int nb_ant;
int ext_2ghz_lna;
int ext_5ghz_lna;
int rssi_2ghz_corr;
int rssi_5ghz_corr;
uint8_t bbp17;
struct rum_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct rum_tx_radiotap_header sc_txtap;
int sc_txtap_len;
};
#define RUM_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define RUM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define RUM_LOCK_ASSERT(sc, t) mtx_assert(&(sc)->sc_mtx, t)
Index: head/sys/dev/usb/wlan/if_run.c
===================================================================
--- head/sys/dev/usb/wlan/if_run.c (revision 287196)
+++ head/sys/dev/usb/wlan/if_run.c (revision 287197)
@@ -1,6306 +1,6218 @@
/*-
* Copyright (c) 2008,2010 Damien Bergamini <damien.bergamini@free.fr>
* ported to FreeBSD by Akinori Furukoshi <moonlightakkiy@yahoo.ca>
* USB Consulting, Hans Petter Selasky <hselasky@freebsd.org>
* Copyright (c) 2013-2014 Kevin Lo
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Ralink Technology RT2700U/RT2800U/RT3000U/RT3900E chipset driver.
* http://www.ralinktech.com/
*/
#include <sys/param.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <sys/kdb.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include "usbdevs.h"
#define USB_DEBUG_VAR run_debug
#include <dev/usb/usb_debug.h>
#include <dev/usb/usb_msctest.h>
#include <dev/usb/wlan/if_runreg.h>
#include <dev/usb/wlan/if_runvar.h>
#ifdef USB_DEBUG
#define RUN_DEBUG
#endif
#ifdef RUN_DEBUG
int run_debug = 0;
static SYSCTL_NODE(_hw_usb, OID_AUTO, run, CTLFLAG_RW, 0, "USB run");
SYSCTL_INT(_hw_usb_run, OID_AUTO, debug, CTLFLAG_RWTUN, &run_debug, 0,
"run debug level");
#endif
#define IEEE80211_HAS_ADDR4(wh) \
(((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
/*
* Because of LOR in run_key_delete(), use atomic instead.
* '& RUN_CMDQ_MASQ' is to loop cmdq[].
*/
#define RUN_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & RUN_CMDQ_MASQ)
static const STRUCT_USB_HOST_ID run_devs[] = {
#define RUN_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) }
#define RUN_DEV_EJECT(v,p) \
{ USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, RUN_EJECT) }
#define RUN_EJECT 1
RUN_DEV(ABOCOM, RT2770),
RUN_DEV(ABOCOM, RT2870),
RUN_DEV(ABOCOM, RT3070),
RUN_DEV(ABOCOM, RT3071),
RUN_DEV(ABOCOM, RT3072),
RUN_DEV(ABOCOM2, RT2870_1),
RUN_DEV(ACCTON, RT2770),
RUN_DEV(ACCTON, RT2870_1),
RUN_DEV(ACCTON, RT2870_2),
RUN_DEV(ACCTON, RT2870_3),
RUN_DEV(ACCTON, RT2870_4),
RUN_DEV(ACCTON, RT2870_5),
RUN_DEV(ACCTON, RT3070),
RUN_DEV(ACCTON, RT3070_1),
RUN_DEV(ACCTON, RT3070_2),
RUN_DEV(ACCTON, RT3070_3),
RUN_DEV(ACCTON, RT3070_4),
RUN_DEV(ACCTON, RT3070_5),
RUN_DEV(AIRTIES, RT3070),
RUN_DEV(ALLWIN, RT2070),
RUN_DEV(ALLWIN, RT2770),
RUN_DEV(ALLWIN, RT2870),
RUN_DEV(ALLWIN, RT3070),
RUN_DEV(ALLWIN, RT3071),
RUN_DEV(ALLWIN, RT3072),
RUN_DEV(ALLWIN, RT3572),
RUN_DEV(AMIGO, RT2870_1),
RUN_DEV(AMIGO, RT2870_2),
RUN_DEV(AMIT, CGWLUSB2GNR),
RUN_DEV(AMIT, RT2870_1),
RUN_DEV(AMIT2, RT2870),
RUN_DEV(ASUS, RT2870_1),
RUN_DEV(ASUS, RT2870_2),
RUN_DEV(ASUS, RT2870_3),
RUN_DEV(ASUS, RT2870_4),
RUN_DEV(ASUS, RT2870_5),
RUN_DEV(ASUS, USBN13),
RUN_DEV(ASUS, RT3070_1),
RUN_DEV(ASUS, USBN66),
RUN_DEV(ASUS, USB_N53),
RUN_DEV(ASUS2, USBN11),
RUN_DEV(AZUREWAVE, RT2870_1),
RUN_DEV(AZUREWAVE, RT2870_2),
RUN_DEV(AZUREWAVE, RT3070_1),
RUN_DEV(AZUREWAVE, RT3070_2),
RUN_DEV(AZUREWAVE, RT3070_3),
RUN_DEV(BELKIN, F9L1103),
RUN_DEV(BELKIN, F5D8053V3),
RUN_DEV(BELKIN, F5D8055),
RUN_DEV(BELKIN, F5D8055V2),
RUN_DEV(BELKIN, F6D4050V1),
RUN_DEV(BELKIN, F6D4050V2),
RUN_DEV(BELKIN, RT2870_1),
RUN_DEV(BELKIN, RT2870_2),
RUN_DEV(CISCOLINKSYS, AE1000),
RUN_DEV(CISCOLINKSYS2, RT3070),
RUN_DEV(CISCOLINKSYS3, RT3070),
RUN_DEV(CONCEPTRONIC2, RT2870_1),
RUN_DEV(CONCEPTRONIC2, RT2870_2),
RUN_DEV(CONCEPTRONIC2, RT2870_3),
RUN_DEV(CONCEPTRONIC2, RT2870_4),
RUN_DEV(CONCEPTRONIC2, RT2870_5),
RUN_DEV(CONCEPTRONIC2, RT2870_6),
RUN_DEV(CONCEPTRONIC2, RT2870_7),
RUN_DEV(CONCEPTRONIC2, RT2870_8),
RUN_DEV(CONCEPTRONIC2, RT3070_1),
RUN_DEV(CONCEPTRONIC2, RT3070_2),
RUN_DEV(CONCEPTRONIC2, VIGORN61),
RUN_DEV(COREGA, CGWLUSB300GNM),
RUN_DEV(COREGA, RT2870_1),
RUN_DEV(COREGA, RT2870_2),
RUN_DEV(COREGA, RT2870_3),
RUN_DEV(COREGA, RT3070),
RUN_DEV(CYBERTAN, RT2870),
RUN_DEV(DLINK, RT2870),
RUN_DEV(DLINK, RT3072),
RUN_DEV(DLINK, DWA127),
RUN_DEV(DLINK, DWA140B3),
RUN_DEV(DLINK, DWA160B2),
RUN_DEV(DLINK, DWA140D1),
RUN_DEV(DLINK, DWA162),
RUN_DEV(DLINK2, DWA130),
RUN_DEV(DLINK2, RT2870_1),
RUN_DEV(DLINK2, RT2870_2),
RUN_DEV(DLINK2, RT3070_1),
RUN_DEV(DLINK2, RT3070_2),
RUN_DEV(DLINK2, RT3070_3),
RUN_DEV(DLINK2, RT3070_4),
RUN_DEV(DLINK2, RT3070_5),
RUN_DEV(DLINK2, RT3072),
RUN_DEV(DLINK2, RT3072_1),
RUN_DEV(EDIMAX, EW7717),
RUN_DEV(EDIMAX, EW7718),
RUN_DEV(EDIMAX, EW7733UND),
RUN_DEV(EDIMAX, RT2870_1),
RUN_DEV(ENCORE, RT3070_1),
RUN_DEV(ENCORE, RT3070_2),
RUN_DEV(ENCORE, RT3070_3),
RUN_DEV(GIGABYTE, GNWB31N),
RUN_DEV(GIGABYTE, GNWB32L),
RUN_DEV(GIGABYTE, RT2870_1),
RUN_DEV(GIGASET, RT3070_1),
RUN_DEV(GIGASET, RT3070_2),
RUN_DEV(GUILLEMOT, HWNU300),
RUN_DEV(HAWKING, HWUN2),
RUN_DEV(HAWKING, RT2870_1),
RUN_DEV(HAWKING, RT2870_2),
RUN_DEV(HAWKING, RT3070),
RUN_DEV(IODATA, RT3072_1),
RUN_DEV(IODATA, RT3072_2),
RUN_DEV(IODATA, RT3072_3),
RUN_DEV(IODATA, RT3072_4),
RUN_DEV(LINKSYS4, RT3070),
RUN_DEV(LINKSYS4, WUSB100),
RUN_DEV(LINKSYS4, WUSB54GCV3),
RUN_DEV(LINKSYS4, WUSB600N),
RUN_DEV(LINKSYS4, WUSB600NV2),
RUN_DEV(LOGITEC, RT2870_1),
RUN_DEV(LOGITEC, RT2870_2),
RUN_DEV(LOGITEC, RT2870_3),
RUN_DEV(LOGITEC, LANW300NU2),
RUN_DEV(LOGITEC, LANW150NU2),
RUN_DEV(LOGITEC, LANW300NU2S),
RUN_DEV(MELCO, WLIUCG300HP),
RUN_DEV(MELCO, RT2870_2),
RUN_DEV(MELCO, WLIUCAG300N),
RUN_DEV(MELCO, WLIUCG300N),
RUN_DEV(MELCO, WLIUCG301N),
RUN_DEV(MELCO, WLIUCGN),
RUN_DEV(MELCO, WLIUCGNM),
RUN_DEV(MELCO, WLIUCG300HPV1),
RUN_DEV(MELCO, WLIUCGNM2),
RUN_DEV(MOTOROLA4, RT2770),
RUN_DEV(MOTOROLA4, RT3070),
RUN_DEV(MSI, RT3070_1),
RUN_DEV(MSI, RT3070_2),
RUN_DEV(MSI, RT3070_3),
RUN_DEV(MSI, RT3070_4),
RUN_DEV(MSI, RT3070_5),
RUN_DEV(MSI, RT3070_6),
RUN_DEV(MSI, RT3070_7),
RUN_DEV(MSI, RT3070_8),
RUN_DEV(MSI, RT3070_9),
RUN_DEV(MSI, RT3070_10),
RUN_DEV(MSI, RT3070_11),
RUN_DEV(OVISLINK, RT3072),
RUN_DEV(PARA, RT3070),
RUN_DEV(PEGATRON, RT2870),
RUN_DEV(PEGATRON, RT3070),
RUN_DEV(PEGATRON, RT3070_2),
RUN_DEV(PEGATRON, RT3070_3),
RUN_DEV(PHILIPS, RT2870),
RUN_DEV(PLANEX2, GWUS300MINIS),
RUN_DEV(PLANEX2, GWUSMICRON),
RUN_DEV(PLANEX2, RT2870),
RUN_DEV(PLANEX2, RT3070),
RUN_DEV(QCOM, RT2870),
RUN_DEV(QUANTA, RT3070),
RUN_DEV(RALINK, RT2070),
RUN_DEV(RALINK, RT2770),
RUN_DEV(RALINK, RT2870),
RUN_DEV(RALINK, RT3070),
RUN_DEV(RALINK, RT3071),
RUN_DEV(RALINK, RT3072),
RUN_DEV(RALINK, RT3370),
RUN_DEV(RALINK, RT3572),
RUN_DEV(RALINK, RT3573),
RUN_DEV(RALINK, RT5370),
RUN_DEV(RALINK, RT5572),
RUN_DEV(RALINK, RT8070),
RUN_DEV(SAMSUNG, WIS09ABGN),
RUN_DEV(SAMSUNG2, RT2870_1),
RUN_DEV(SENAO, RT2870_1),
RUN_DEV(SENAO, RT2870_2),
RUN_DEV(SENAO, RT2870_3),
RUN_DEV(SENAO, RT2870_4),
RUN_DEV(SENAO, RT3070),
RUN_DEV(SENAO, RT3071),
RUN_DEV(SENAO, RT3072_1),
RUN_DEV(SENAO, RT3072_2),
RUN_DEV(SENAO, RT3072_3),
RUN_DEV(SENAO, RT3072_4),
RUN_DEV(SENAO, RT3072_5),
RUN_DEV(SITECOMEU, RT2770),
RUN_DEV(SITECOMEU, RT2870_1),
RUN_DEV(SITECOMEU, RT2870_2),
RUN_DEV(SITECOMEU, RT2870_3),
RUN_DEV(SITECOMEU, RT2870_4),
RUN_DEV(SITECOMEU, RT3070),
RUN_DEV(SITECOMEU, RT3070_2),
RUN_DEV(SITECOMEU, RT3070_3),
RUN_DEV(SITECOMEU, RT3070_4),
RUN_DEV(SITECOMEU, RT3071),
RUN_DEV(SITECOMEU, RT3072_1),
RUN_DEV(SITECOMEU, RT3072_2),
RUN_DEV(SITECOMEU, RT3072_3),
RUN_DEV(SITECOMEU, RT3072_4),
RUN_DEV(SITECOMEU, RT3072_5),
RUN_DEV(SITECOMEU, RT3072_6),
RUN_DEV(SITECOMEU, WL608),
RUN_DEV(SPARKLAN, RT2870_1),
RUN_DEV(SPARKLAN, RT3070),
RUN_DEV(SWEEX2, LW153),
RUN_DEV(SWEEX2, LW303),
RUN_DEV(SWEEX2, LW313),
RUN_DEV(TOSHIBA, RT3070),
RUN_DEV(UMEDIA, RT2870_1),
RUN_DEV(ZCOM, RT2870_1),
RUN_DEV(ZCOM, RT2870_2),
RUN_DEV(ZINWELL, RT2870_1),
RUN_DEV(ZINWELL, RT2870_2),
RUN_DEV(ZINWELL, RT3070),
RUN_DEV(ZINWELL, RT3072_1),
RUN_DEV(ZINWELL, RT3072_2),
RUN_DEV(ZYXEL, RT2870_1),
RUN_DEV(ZYXEL, RT2870_2),
RUN_DEV(ZYXEL, RT3070),
RUN_DEV_EJECT(ZYXEL, NWD2705),
RUN_DEV_EJECT(RALINK, RT_STOR),
#undef RUN_DEV_EJECT
#undef RUN_DEV
};
static device_probe_t run_match;
static device_attach_t run_attach;
static device_detach_t run_detach;
static usb_callback_t run_bulk_rx_callback;
static usb_callback_t run_bulk_tx_callback0;
static usb_callback_t run_bulk_tx_callback1;
static usb_callback_t run_bulk_tx_callback2;
static usb_callback_t run_bulk_tx_callback3;
static usb_callback_t run_bulk_tx_callback4;
static usb_callback_t run_bulk_tx_callback5;
static void run_autoinst(void *, struct usb_device *,
struct usb_attach_arg *);
static int run_driver_loaded(struct module *, int, void *);
static void run_bulk_tx_callbackN(struct usb_xfer *xfer,
usb_error_t error, u_int index);
static struct ieee80211vap *run_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void run_vap_delete(struct ieee80211vap *);
static void run_cmdq_cb(void *, int);
static void run_setup_tx_list(struct run_softc *,
struct run_endpoint_queue *);
static void run_unsetup_tx_list(struct run_softc *,
struct run_endpoint_queue *);
static int run_load_microcode(struct run_softc *);
static int run_reset(struct run_softc *);
static usb_error_t run_do_request(struct run_softc *,
struct usb_device_request *, void *);
static int run_read(struct run_softc *, uint16_t, uint32_t *);
static int run_read_region_1(struct run_softc *, uint16_t, uint8_t *, int);
static int run_write_2(struct run_softc *, uint16_t, uint16_t);
static int run_write(struct run_softc *, uint16_t, uint32_t);
static int run_write_region_1(struct run_softc *, uint16_t,
const uint8_t *, int);
static int run_set_region_4(struct run_softc *, uint16_t, uint32_t, int);
static int run_efuse_read(struct run_softc *, uint16_t, uint16_t *, int);
static int run_efuse_read_2(struct run_softc *, uint16_t, uint16_t *);
static int run_eeprom_read_2(struct run_softc *, uint16_t, uint16_t *);
static int run_rt2870_rf_write(struct run_softc *, uint32_t);
static int run_rt3070_rf_read(struct run_softc *, uint8_t, uint8_t *);
static int run_rt3070_rf_write(struct run_softc *, uint8_t, uint8_t);
static int run_bbp_read(struct run_softc *, uint8_t, uint8_t *);
static int run_bbp_write(struct run_softc *, uint8_t, uint8_t);
static int run_mcu_cmd(struct run_softc *, uint8_t, uint16_t);
static const char *run_get_rf(uint16_t);
static void run_rt3593_get_txpower(struct run_softc *);
static void run_get_txpower(struct run_softc *);
static int run_read_eeprom(struct run_softc *);
static struct ieee80211_node *run_node_alloc(struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
static int run_media_change(struct ifnet *);
static int run_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static int run_wme_update(struct ieee80211com *);
static void run_wme_update_cb(void *);
static void run_key_update_begin(struct ieee80211vap *);
static void run_key_update_end(struct ieee80211vap *);
static void run_key_set_cb(void *);
static int run_key_set(struct ieee80211vap *, struct ieee80211_key *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
static void run_key_delete_cb(void *);
static int run_key_delete(struct ieee80211vap *, struct ieee80211_key *);
static void run_ratectl_to(void *);
static void run_ratectl_cb(void *, int);
static void run_drain_fifo(void *);
static void run_iter_func(void *, struct ieee80211_node *);
static void run_newassoc_cb(void *);
static void run_newassoc(struct ieee80211_node *, int);
static void run_rx_frame(struct run_softc *, struct mbuf *, uint32_t);
static void run_tx_free(struct run_endpoint_queue *pq,
struct run_tx_data *, int);
static void run_set_tx_desc(struct run_softc *, struct run_tx_data *);
static int run_tx(struct run_softc *, struct mbuf *,
struct ieee80211_node *);
static int run_tx_mgt(struct run_softc *, struct mbuf *,
struct ieee80211_node *);
static int run_sendprot(struct run_softc *, const struct mbuf *,
struct ieee80211_node *, int, int);
static int run_tx_param(struct run_softc *, struct mbuf *,
struct ieee80211_node *,
const struct ieee80211_bpf_params *);
static int run_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
-static void run_start(struct ifnet *);
-static int run_ioctl(struct ifnet *, u_long, caddr_t);
+static int run_transmit(struct ieee80211com *, struct mbuf *);
+static void run_start(struct run_softc *);
+static void run_parent(struct ieee80211com *);
static void run_iq_calib(struct run_softc *, u_int);
static void run_set_agc(struct run_softc *, uint8_t);
static void run_select_chan_group(struct run_softc *, int);
static void run_set_rx_antenna(struct run_softc *, int);
static void run_rt2870_set_chan(struct run_softc *, u_int);
static void run_rt3070_set_chan(struct run_softc *, u_int);
static void run_rt3572_set_chan(struct run_softc *, u_int);
static void run_rt3593_set_chan(struct run_softc *, u_int);
static void run_rt5390_set_chan(struct run_softc *, u_int);
static void run_rt5592_set_chan(struct run_softc *, u_int);
static int run_set_chan(struct run_softc *, struct ieee80211_channel *);
static void run_set_channel(struct ieee80211com *);
static void run_scan_start(struct ieee80211com *);
static void run_scan_end(struct ieee80211com *);
static void run_update_beacon(struct ieee80211vap *, int);
static void run_update_beacon_cb(void *);
static void run_updateprot(struct ieee80211com *);
static void run_updateprot_cb(void *);
static void run_usb_timeout_cb(void *);
static void run_reset_livelock(struct run_softc *);
static void run_enable_tsf_sync(struct run_softc *);
static void run_enable_mrr(struct run_softc *);
static void run_set_txpreamble(struct run_softc *);
static void run_set_basicrates(struct run_softc *);
static void run_set_leds(struct run_softc *, uint16_t);
static void run_set_bssid(struct run_softc *, const uint8_t *);
static void run_set_macaddr(struct run_softc *, const uint8_t *);
static void run_updateslot(struct ieee80211com *);
static void run_updateslot_cb(void *);
static void run_update_mcast(struct ieee80211com *);
static int8_t run_rssi2dbm(struct run_softc *, uint8_t, uint8_t);
static void run_update_promisc_locked(struct run_softc *);
static void run_update_promisc(struct ieee80211com *);
static void run_rt5390_bbp_init(struct run_softc *);
static int run_bbp_init(struct run_softc *);
static int run_rt3070_rf_init(struct run_softc *);
static void run_rt3593_rf_init(struct run_softc *);
static void run_rt5390_rf_init(struct run_softc *);
static int run_rt3070_filter_calib(struct run_softc *, uint8_t, uint8_t,
uint8_t *);
static void run_rt3070_rf_setup(struct run_softc *);
static void run_rt3593_rf_setup(struct run_softc *);
static void run_rt5390_rf_setup(struct run_softc *);
static int run_txrx_enable(struct run_softc *);
static void run_adjust_freq_offset(struct run_softc *);
-static void run_init(void *);
static void run_init_locked(struct run_softc *);
static void run_stop(void *);
static void run_delay(struct run_softc *, u_int);
static eventhandler_tag run_etag;
static const struct rt2860_rate {
uint8_t rate;
uint8_t mcs;
enum ieee80211_phytype phy;
uint8_t ctl_ridx;
uint16_t sp_ack_dur;
uint16_t lp_ack_dur;
} rt2860_rates[] = {
{ 2, 0, IEEE80211_T_DS, 0, 314, 314 },
{ 4, 1, IEEE80211_T_DS, 1, 258, 162 },
{ 11, 2, IEEE80211_T_DS, 2, 223, 127 },
{ 22, 3, IEEE80211_T_DS, 3, 213, 117 },
{ 12, 0, IEEE80211_T_OFDM, 4, 60, 60 },
{ 18, 1, IEEE80211_T_OFDM, 4, 52, 52 },
{ 24, 2, IEEE80211_T_OFDM, 6, 48, 48 },
{ 36, 3, IEEE80211_T_OFDM, 6, 44, 44 },
{ 48, 4, IEEE80211_T_OFDM, 8, 44, 44 },
{ 72, 5, IEEE80211_T_OFDM, 8, 40, 40 },
{ 96, 6, IEEE80211_T_OFDM, 8, 40, 40 },
{ 108, 7, IEEE80211_T_OFDM, 8, 40, 40 }
};
static const struct {
uint16_t reg;
uint32_t val;
} rt2870_def_mac[] = {
RT2870_DEF_MAC
};
static const struct {
uint8_t reg;
uint8_t val;
} rt2860_def_bbp[] = {
RT2860_DEF_BBP
},rt5390_def_bbp[] = {
RT5390_DEF_BBP
},rt5592_def_bbp[] = {
RT5592_DEF_BBP
};
/*
* Default values for BBP register R196 for RT5592.
*/
static const uint8_t rt5592_bbp_r196[] = {
0xe0, 0x1f, 0x38, 0x32, 0x08, 0x28, 0x19, 0x0a, 0xff, 0x00,
0x16, 0x10, 0x10, 0x0b, 0x36, 0x2c, 0x26, 0x24, 0x42, 0x36,
0x30, 0x2d, 0x4c, 0x46, 0x3d, 0x40, 0x3e, 0x42, 0x3d, 0x40,
0x3c, 0x34, 0x2c, 0x2f, 0x3c, 0x35, 0x2e, 0x2a, 0x49, 0x41,
0x36, 0x31, 0x30, 0x30, 0x0e, 0x0d, 0x28, 0x21, 0x1c, 0x16,
0x50, 0x4a, 0x43, 0x40, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7d, 0x14, 0x32, 0x2c, 0x36, 0x4c, 0x43, 0x2c,
0x2e, 0x36, 0x30, 0x6e
};
static const struct rfprog {
uint8_t chan;
uint32_t r1, r2, r3, r4;
} rt2860_rf2850[] = {
RT2860_RF2850
};
struct {
uint8_t n, r, k;
} rt3070_freqs[] = {
RT3070_RF3052
};
static const struct rt5592_freqs {
uint16_t n;
uint8_t k, m, r;
} rt5592_freqs_20mhz[] = {
RT5592_RF5592_20MHZ
},rt5592_freqs_40mhz[] = {
RT5592_RF5592_40MHZ
};
static const struct {
uint8_t reg;
uint8_t val;
} rt3070_def_rf[] = {
RT3070_DEF_RF
},rt3572_def_rf[] = {
RT3572_DEF_RF
},rt3593_def_rf[] = {
RT3593_DEF_RF
},rt5390_def_rf[] = {
RT5390_DEF_RF
},rt5392_def_rf[] = {
RT5392_DEF_RF
},rt5592_def_rf[] = {
RT5592_DEF_RF
},rt5592_2ghz_def_rf[] = {
RT5592_2GHZ_DEF_RF
},rt5592_5ghz_def_rf[] = {
RT5592_5GHZ_DEF_RF
};
static const struct {
u_int firstchan;
u_int lastchan;
uint8_t reg;
uint8_t val;
} rt5592_chan_5ghz[] = {
RT5592_CHAN_5GHZ
};
static const struct usb_config run_config[RUN_N_XFER] = {
[RUN_BULK_TX_BE] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.ep_index = 0,
.direction = UE_DIR_OUT,
.bufsize = RUN_MAX_TXSZ,
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = run_bulk_tx_callback0,
.timeout = 5000, /* ms */
},
[RUN_BULK_TX_BK] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.ep_index = 1,
.bufsize = RUN_MAX_TXSZ,
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = run_bulk_tx_callback1,
.timeout = 5000, /* ms */
},
[RUN_BULK_TX_VI] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.ep_index = 2,
.bufsize = RUN_MAX_TXSZ,
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = run_bulk_tx_callback2,
.timeout = 5000, /* ms */
},
[RUN_BULK_TX_VO] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.ep_index = 3,
.bufsize = RUN_MAX_TXSZ,
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = run_bulk_tx_callback3,
.timeout = 5000, /* ms */
},
[RUN_BULK_TX_HCCA] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.ep_index = 4,
.bufsize = RUN_MAX_TXSZ,
.flags = {.pipe_bof = 1,.force_short_xfer = 1,.no_pipe_ok = 1,},
.callback = run_bulk_tx_callback4,
.timeout = 5000, /* ms */
},
[RUN_BULK_TX_PRIO] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.ep_index = 5,
.bufsize = RUN_MAX_TXSZ,
.flags = {.pipe_bof = 1,.force_short_xfer = 1,.no_pipe_ok = 1,},
.callback = run_bulk_tx_callback5,
.timeout = 5000, /* ms */
},
[RUN_BULK_RX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = RUN_MAX_RXSZ,
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = run_bulk_rx_callback,
}
};
static void
run_autoinst(void *arg, struct usb_device *udev,
struct usb_attach_arg *uaa)
{
struct usb_interface *iface;
struct usb_interface_descriptor *id;
if (uaa->dev_state != UAA_DEV_READY)
return;
iface = usbd_get_iface(udev, 0);
if (iface == NULL)
return;
id = iface->idesc;
if (id == NULL || id->bInterfaceClass != UICLASS_MASS)
return;
if (usbd_lookup_id_by_uaa(run_devs, sizeof(run_devs), uaa))
return;
if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0)
uaa->dev_state = UAA_DEV_EJECTING;
}
static int
run_driver_loaded(struct module *mod, int what, void *arg)
{
switch (what) {
case MOD_LOAD:
run_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
run_autoinst, NULL, EVENTHANDLER_PRI_ANY);
break;
case MOD_UNLOAD:
EVENTHANDLER_DEREGISTER(usb_dev_configured, run_etag);
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static int
run_match(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != 0)
return (ENXIO);
if (uaa->info.bIfaceIndex != RT2860_IFACE_INDEX)
return (ENXIO);
return (usbd_lookup_id_by_uaa(run_devs, sizeof(run_devs), uaa));
}
static int
run_attach(device_t self)
{
struct run_softc *sc = device_get_softc(self);
struct usb_attach_arg *uaa = device_get_ivars(self);
- struct ieee80211com *ic;
- struct ifnet *ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t ver;
int ntries, error;
uint8_t iface_index, bands;
device_set_usb_desc(self);
sc->sc_udev = uaa->device;
sc->sc_dev = self;
if (USB_GET_DRIVER_INFO(uaa) != RUN_EJECT)
sc->sc_flags |= RUN_FLAG_FWLOAD_NEEDED;
mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev),
MTX_NETWORK_LOCK, MTX_DEF);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
iface_index = RT2860_IFACE_INDEX;
error = usbd_transfer_setup(uaa->device, &iface_index,
sc->sc_xfer, run_config, RUN_N_XFER, sc, &sc->sc_mtx);
if (error) {
device_printf(self, "could not allocate USB transfers, "
"err=%s\n", usbd_errstr(error));
goto detach;
}
RUN_LOCK(sc);
/* wait for the chip to settle */
for (ntries = 0; ntries < 100; ntries++) {
if (run_read(sc, RT2860_ASIC_VER_ID, &ver) != 0) {
RUN_UNLOCK(sc);
goto detach;
}
if (ver != 0 && ver != 0xffffffff)
break;
run_delay(sc, 10);
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"timeout waiting for NIC to initialize\n");
RUN_UNLOCK(sc);
goto detach;
}
sc->mac_ver = ver >> 16;
sc->mac_rev = ver & 0xffff;
/* retrieve RF rev. no and various other things from EEPROM */
run_read_eeprom(sc);
device_printf(sc->sc_dev,
"MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n",
sc->mac_ver, sc->mac_rev, run_get_rf(sc->rf_rev),
- sc->ntxchains, sc->nrxchains, ether_sprintf(sc->sc_bssid));
+ sc->ntxchains, sc->nrxchains, ether_sprintf(ic->ic_macaddr));
RUN_UNLOCK(sc);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- goto detach;
- }
- ic = ifp->if_l2com;
-
- ifp->if_softc = sc;
- if_initname(ifp, "run", device_get_unit(sc->sc_dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = run_init;
- ifp->if_ioctl = run_ioctl;
- ifp->if_start = run_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(self);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA | /* station mode supported */
IEEE80211_C_MONITOR | /* monitor mode supported */
IEEE80211_C_IBSS |
IEEE80211_C_HOSTAP |
IEEE80211_C_WDS | /* 4-address traffic works */
IEEE80211_C_MBSS |
IEEE80211_C_SHPREAMBLE | /* short preamble supported */
IEEE80211_C_SHSLOT | /* short slot time supported */
IEEE80211_C_WME | /* WME */
IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */
ic->ic_cryptocaps =
IEEE80211_CRYPTO_WEP |
IEEE80211_CRYPTO_AES_CCM |
IEEE80211_CRYPTO_TKIPMIC |
IEEE80211_CRYPTO_TKIP;
ic->ic_flags |= IEEE80211_F_DATAPAD;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
if (sc->rf_rev == RT2860_RF_2750 || sc->rf_rev == RT2860_RF_2850 ||
sc->rf_rev == RT3070_RF_3052 || sc->rf_rev == RT3593_RF_3053 ||
sc->rf_rev == RT5592_RF_5592)
setbit(&bands, IEEE80211_MODE_11A);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, sc->sc_bssid);
+ ieee80211_ifattach(ic);
ic->ic_scan_start = run_scan_start;
ic->ic_scan_end = run_scan_end;
ic->ic_set_channel = run_set_channel;
ic->ic_node_alloc = run_node_alloc;
ic->ic_newassoc = run_newassoc;
ic->ic_updateslot = run_updateslot;
ic->ic_update_mcast = run_update_mcast;
ic->ic_wme.wme_update = run_wme_update;
ic->ic_raw_xmit = run_raw_xmit;
ic->ic_update_promisc = run_update_promisc;
-
ic->ic_vap_create = run_vap_create;
ic->ic_vap_delete = run_vap_delete;
+ ic->ic_transmit = run_transmit;
+ ic->ic_parent = run_parent;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
RUN_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
RUN_RX_RADIOTAP_PRESENT);
TASK_INIT(&sc->cmdq_task, 0, run_cmdq_cb, sc);
TASK_INIT(&sc->ratectl_task, 0, run_ratectl_cb, sc);
usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0);
if (bootverbose)
ieee80211_announce(ic);
return (0);
detach:
run_detach(self);
return (ENXIO);
}
static int
run_detach(device_t self)
{
struct run_softc *sc = device_get_softc(self);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
int i;
RUN_LOCK(sc);
sc->sc_detached = 1;
RUN_UNLOCK(sc);
/* stop all USB transfers */
usbd_transfer_unsetup(sc->sc_xfer, RUN_N_XFER);
RUN_LOCK(sc);
sc->ratectl_run = RUN_RATECTL_OFF;
sc->cmdq_run = sc->cmdq_key_set = RUN_CMDQ_ABORT;
/* free TX list, if any */
for (i = 0; i != RUN_EP_QUEUES; i++)
run_unsetup_tx_list(sc, &sc->sc_epq[i]);
RUN_UNLOCK(sc);
- if (ifp) {
- ic = ifp->if_l2com;
+ if (sc->sc_ic.ic_softc == sc) {
/* drain tasks */
usb_callout_drain(&sc->ratectl_ch);
ieee80211_draintask(ic, &sc->cmdq_task);
ieee80211_draintask(ic, &sc->ratectl_task);
ieee80211_ifdetach(ic);
- if_free(ifp);
}
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static struct ieee80211vap *
run_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
- struct ifnet *ifp = ic->ic_ifp;
struct run_softc *sc = ic->ic_softc;
struct run_vap *rvp;
struct ieee80211vap *vap;
int i;
if (sc->rvp_cnt >= RUN_VAP_MAX) {
- if_printf(ifp, "number of VAPs maxed out\n");
+ device_printf(sc->sc_dev, "number of VAPs maxed out\n");
return (NULL);
}
switch (opmode) {
case IEEE80211_M_STA:
/* enable s/w bmiss handling for sta mode */
flags |= IEEE80211_CLONE_NOBEACONS;
/* fall though */
case IEEE80211_M_IBSS:
case IEEE80211_M_MONITOR:
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
/* other than WDS vaps, only one at a time */
if (!TAILQ_EMPTY(&ic->ic_vaps))
return (NULL);
break;
case IEEE80211_M_WDS:
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next){
if(vap->iv_opmode != IEEE80211_M_HOSTAP)
continue;
/* WDS vap's always share the local mac address. */
flags &= ~IEEE80211_CLONE_BSSID;
break;
}
if (vap == NULL) {
- if_printf(ifp, "wds only supported in ap mode\n");
+ device_printf(sc->sc_dev,
+ "wds only supported in ap mode\n");
return (NULL);
}
break;
default:
- if_printf(ifp, "unknown opmode %d\n", opmode);
+ device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
return (NULL);
}
- rvp = (struct run_vap *) malloc(sizeof(struct run_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (rvp == NULL)
- return (NULL);
+ rvp = malloc(sizeof(struct run_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &rvp->vap;
- if (ieee80211_vap_setup(ic, vap, name, unit,
- opmode, flags, bssid, mac) != 0) {
+ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
+ bssid) != 0) {
/* out of memory */
free(rvp, M_80211_VAP);
return (NULL);
}
vap->iv_key_update_begin = run_key_update_begin;
vap->iv_key_update_end = run_key_update_end;
vap->iv_update_beacon = run_update_beacon;
vap->iv_max_aid = RT2870_WCID_MAX;
/*
* To delete the right key from h/w, we need wcid.
* Luckily, there is unused space in ieee80211_key{}, wk_pad,
* and matching wcid will be written into there. So, cast
* some spells to remove 'const' from ieee80211_key{}
*/
vap->iv_key_delete = (void *)run_key_delete;
vap->iv_key_set = (void *)run_key_set;
/* override state transition machine */
rvp->newstate = vap->iv_newstate;
vap->iv_newstate = run_newstate;
ieee80211_ratectl_init(vap);
ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */);
/* complete setup */
- ieee80211_vap_attach(vap, run_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, run_media_change, ieee80211_media_status,
+ mac);
/* make sure id is always unique */
for (i = 0; i < RUN_VAP_MAX; i++) {
if((sc->rvp_bmap & 1 << i) == 0){
sc->rvp_bmap |= 1 << i;
rvp->rvp_id = i;
break;
}
}
if (sc->rvp_cnt++ == 0)
ic->ic_opmode = opmode;
if (opmode == IEEE80211_M_HOSTAP)
sc->cmdq_run = RUN_CMDQ_GO;
DPRINTF("rvp_id=%d bmap=%x rvp_cnt=%d\n",
rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt);
return (vap);
}
static void
run_vap_delete(struct ieee80211vap *vap)
{
struct run_vap *rvp = RUN_VAP(vap);
- struct ifnet *ifp;
struct ieee80211com *ic;
struct run_softc *sc;
uint8_t rvp_id;
if (vap == NULL)
return;
ic = vap->iv_ic;
- ifp = ic->ic_ifp;
sc = ic->ic_softc;
RUN_LOCK(sc);
m_freem(rvp->beacon_mbuf);
rvp->beacon_mbuf = NULL;
rvp_id = rvp->rvp_id;
sc->ratectl_run &= ~(1 << rvp_id);
sc->rvp_bmap &= ~(1 << rvp_id);
run_set_region_4(sc, RT2860_SKEY(rvp_id, 0), 0, 128);
run_set_region_4(sc, RT2860_BCN_BASE(rvp_id), 0, 512);
--sc->rvp_cnt;
DPRINTF("vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n",
vap, rvp_id, sc->rvp_bmap, sc->rvp_cnt);
RUN_UNLOCK(sc);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(rvp, M_80211_VAP);
}
/*
* There are numbers of functions need to be called in context thread.
* Rather than creating taskqueue event for each of those functions,
* here is all-for-one taskqueue callback function. This function
* gurantees deferred functions are executed in the same order they
* were enqueued.
* '& RUN_CMDQ_MASQ' is to loop cmdq[].
*/
static void
run_cmdq_cb(void *arg, int pending)
{
struct run_softc *sc = arg;
uint8_t i;
/* call cmdq[].func locked */
RUN_LOCK(sc);
for (i = sc->cmdq_exec; sc->cmdq[i].func && pending;
i = sc->cmdq_exec, pending--) {
DPRINTFN(6, "cmdq_exec=%d pending=%d\n", i, pending);
if (sc->cmdq_run == RUN_CMDQ_GO) {
/*
* If arg0 is NULL, callback func needs more
* than one arg. So, pass ptr to cmdq struct.
*/
if (sc->cmdq[i].arg0)
sc->cmdq[i].func(sc->cmdq[i].arg0);
else
sc->cmdq[i].func(&sc->cmdq[i]);
}
sc->cmdq[i].arg0 = NULL;
sc->cmdq[i].func = NULL;
sc->cmdq_exec++;
sc->cmdq_exec &= RUN_CMDQ_MASQ;
}
RUN_UNLOCK(sc);
}
static void
run_setup_tx_list(struct run_softc *sc, struct run_endpoint_queue *pq)
{
struct run_tx_data *data;
memset(pq, 0, sizeof(*pq));
STAILQ_INIT(&pq->tx_qh);
STAILQ_INIT(&pq->tx_fh);
for (data = &pq->tx_data[0];
data < &pq->tx_data[RUN_TX_RING_COUNT]; data++) {
data->sc = sc;
STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
}
pq->tx_nfree = RUN_TX_RING_COUNT;
}
static void
run_unsetup_tx_list(struct run_softc *sc, struct run_endpoint_queue *pq)
{
struct run_tx_data *data;
/* make sure any subsequent use of the queues will fail */
pq->tx_nfree = 0;
STAILQ_INIT(&pq->tx_fh);
STAILQ_INIT(&pq->tx_qh);
/* free up all node references and mbufs */
for (data = &pq->tx_data[0];
data < &pq->tx_data[RUN_TX_RING_COUNT]; data++) {
if (data->m != NULL) {
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
}
}
static int
run_load_microcode(struct run_softc *sc)
{
usb_device_request_t req;
const struct firmware *fw;
const u_char *base;
uint32_t tmp;
int ntries, error;
const uint64_t *temp;
uint64_t bytes;
RUN_UNLOCK(sc);
fw = firmware_get("runfw");
RUN_LOCK(sc);
if (fw == NULL) {
device_printf(sc->sc_dev,
"failed loadfirmware of file %s\n", "runfw");
return ENOENT;
}
if (fw->datasize != 8192) {
device_printf(sc->sc_dev,
"invalid firmware size (should be 8KB)\n");
error = EINVAL;
goto fail;
}
/*
* RT3071/RT3072 use a different firmware
* run-rt2870 (8KB) contains both,
* first half (4KB) is for rt2870,
* last half is for rt3071.
*/
base = fw->data;
if ((sc->mac_ver) != 0x2860 &&
(sc->mac_ver) != 0x2872 &&
(sc->mac_ver) != 0x3070) {
base += 4096;
}
/* cheap sanity check */
temp = fw->data;
bytes = *temp;
if (bytes != be64toh(0xffffff0210280210ULL)) {
device_printf(sc->sc_dev, "firmware checksum failed\n");
error = EINVAL;
goto fail;
}
/* write microcode image */
if (sc->sc_flags & RUN_FLAG_FWLOAD_NEEDED) {
run_write_region_1(sc, RT2870_FW_BASE, base, 4096);
run_write(sc, RT2860_H2M_MAILBOX_CID, 0xffffffff);
run_write(sc, RT2860_H2M_MAILBOX_STATUS, 0xffffffff);
}
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = RT2870_RESET;
USETW(req.wValue, 8);
USETW(req.wIndex, 0);
USETW(req.wLength, 0);
if ((error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL))
!= 0) {
device_printf(sc->sc_dev, "firmware reset failed\n");
goto fail;
}
run_delay(sc, 10);
run_write(sc, RT2860_H2M_BBPAGENT, 0);
run_write(sc, RT2860_H2M_MAILBOX, 0);
run_write(sc, RT2860_H2M_INTSRC, 0);
if ((error = run_mcu_cmd(sc, RT2860_MCU_CMD_RFRESET, 0)) != 0)
goto fail;
/* wait until microcontroller is ready */
for (ntries = 0; ntries < 1000; ntries++) {
if ((error = run_read(sc, RT2860_SYS_CTRL, &tmp)) != 0)
goto fail;
if (tmp & RT2860_MCU_READY)
break;
run_delay(sc, 10);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for MCU to initialize\n");
error = ETIMEDOUT;
goto fail;
}
device_printf(sc->sc_dev, "firmware %s ver. %u.%u loaded\n",
(base == fw->data) ? "RT2870" : "RT3071",
*(base + 4092), *(base + 4093));
fail:
firmware_put(fw, FIRMWARE_UNLOAD);
return (error);
}
static int
run_reset(struct run_softc *sc)
{
usb_device_request_t req;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = RT2870_RESET;
USETW(req.wValue, 1);
USETW(req.wIndex, 0);
USETW(req.wLength, 0);
return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL));
}
static usb_error_t
run_do_request(struct run_softc *sc,
struct usb_device_request *req, void *data)
{
usb_error_t err;
int ntries = 10;
RUN_LOCK_ASSERT(sc, MA_OWNED);
while (ntries--) {
err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
req, data, 0, NULL, 250 /* ms */);
if (err == 0)
break;
DPRINTFN(1, "Control request failed, %s (retrying)\n",
usbd_errstr(err));
run_delay(sc, 10);
}
return (err);
}
static int
run_read(struct run_softc *sc, uint16_t reg, uint32_t *val)
{
uint32_t tmp;
int error;
error = run_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp);
if (error == 0)
*val = le32toh(tmp);
else
*val = 0xffffffff;
return (error);
}
static int
run_read_region_1(struct run_softc *sc, uint16_t reg, uint8_t *buf, int len)
{
usb_device_request_t req;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = RT2870_READ_REGION_1;
USETW(req.wValue, 0);
USETW(req.wIndex, reg);
USETW(req.wLength, len);
return (run_do_request(sc, &req, buf));
}
static int
run_write_2(struct run_softc *sc, uint16_t reg, uint16_t val)
{
usb_device_request_t req;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = RT2870_WRITE_2;
USETW(req.wValue, val);
USETW(req.wIndex, reg);
USETW(req.wLength, 0);
return (run_do_request(sc, &req, NULL));
}
static int
run_write(struct run_softc *sc, uint16_t reg, uint32_t val)
{
int error;
if ((error = run_write_2(sc, reg, val & 0xffff)) == 0)
error = run_write_2(sc, reg + 2, val >> 16);
return (error);
}
static int
run_write_region_1(struct run_softc *sc, uint16_t reg, const uint8_t *buf,
int len)
{
#if 1
int i, error = 0;
/*
* NB: the WRITE_REGION_1 command is not stable on RT2860.
* We thus issue multiple WRITE_2 commands instead.
*/
KASSERT((len & 1) == 0, ("run_write_region_1: Data too long.\n"));
for (i = 0; i < len && error == 0; i += 2)
error = run_write_2(sc, reg + i, buf[i] | buf[i + 1] << 8);
return (error);
#else
usb_device_request_t req;
int error = 0;
/*
* NOTE: It appears the WRITE_REGION_1 command cannot be
* passed a huge amount of data, which will crash the
* firmware. Limit amount of data passed to 64-bytes at a
* time.
*/
while (len > 0) {
int delta = 64;
if (delta > len)
delta = len;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = RT2870_WRITE_REGION_1;
USETW(req.wValue, 0);
USETW(req.wIndex, reg);
USETW(req.wLength, delta);
error = run_do_request(sc, &req, __DECONST(uint8_t *, buf));
if (error != 0)
break;
reg += delta;
buf += delta;
len -= delta;
}
return (error);
#endif
}
static int
run_set_region_4(struct run_softc *sc, uint16_t reg, uint32_t val, int len)
{
int i, error = 0;
KASSERT((len & 3) == 0, ("run_set_region_4: Invalid data length.\n"));
for (i = 0; i < len && error == 0; i += 4)
error = run_write(sc, reg + i, val);
return (error);
}
static int
run_efuse_read(struct run_softc *sc, uint16_t addr, uint16_t *val, int count)
{
uint32_t tmp;
uint16_t reg;
int error, ntries;
if ((error = run_read(sc, RT3070_EFUSE_CTRL, &tmp)) != 0)
return (error);
if (count == 2)
addr *= 2;
/*-
* Read one 16-byte block into registers EFUSE_DATA[0-3]:
* DATA0: F E D C
* DATA1: B A 9 8
* DATA2: 7 6 5 4
* DATA3: 3 2 1 0
*/
tmp &= ~(RT3070_EFSROM_MODE_MASK | RT3070_EFSROM_AIN_MASK);
tmp |= (addr & ~0xf) << RT3070_EFSROM_AIN_SHIFT | RT3070_EFSROM_KICK;
run_write(sc, RT3070_EFUSE_CTRL, tmp);
for (ntries = 0; ntries < 100; ntries++) {
if ((error = run_read(sc, RT3070_EFUSE_CTRL, &tmp)) != 0)
return (error);
if (!(tmp & RT3070_EFSROM_KICK))
break;
run_delay(sc, 2);
}
if (ntries == 100)
return (ETIMEDOUT);
if ((tmp & RT3070_EFUSE_AOUT_MASK) == RT3070_EFUSE_AOUT_MASK) {
*val = 0xffff; /* address not found */
return (0);
}
/* determine to which 32-bit register our 16-bit word belongs */
reg = RT3070_EFUSE_DATA3 - (addr & 0xc);
if ((error = run_read(sc, reg, &tmp)) != 0)
return (error);
tmp >>= (8 * (addr & 0x3));
*val = (addr & 1) ? tmp >> 16 : tmp & 0xffff;
return (0);
}
/* Read 16-bit from eFUSE ROM for RT3xxx. */
static int
run_efuse_read_2(struct run_softc *sc, uint16_t addr, uint16_t *val)
{
return (run_efuse_read(sc, addr, val, 2));
}
static int
run_eeprom_read_2(struct run_softc *sc, uint16_t addr, uint16_t *val)
{
usb_device_request_t req;
uint16_t tmp;
int error;
addr *= 2;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = RT2870_EEPROM_READ;
USETW(req.wValue, 0);
USETW(req.wIndex, addr);
USETW(req.wLength, sizeof(tmp));
error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &tmp);
if (error == 0)
*val = le16toh(tmp);
else
*val = 0xffff;
return (error);
}
static __inline int
run_srom_read(struct run_softc *sc, uint16_t addr, uint16_t *val)
{
/* either eFUSE ROM or EEPROM */
return sc->sc_srom_read(sc, addr, val);
}
static int
run_rt2870_rf_write(struct run_softc *sc, uint32_t val)
{
uint32_t tmp;
int error, ntries;
for (ntries = 0; ntries < 10; ntries++) {
if ((error = run_read(sc, RT2860_RF_CSR_CFG0, &tmp)) != 0)
return (error);
if (!(tmp & RT2860_RF_REG_CTRL))
break;
}
if (ntries == 10)
return (ETIMEDOUT);
return (run_write(sc, RT2860_RF_CSR_CFG0, val));
}
static int
run_rt3070_rf_read(struct run_softc *sc, uint8_t reg, uint8_t *val)
{
uint32_t tmp;
int error, ntries;
for (ntries = 0; ntries < 100; ntries++) {
if ((error = run_read(sc, RT3070_RF_CSR_CFG, &tmp)) != 0)
return (error);
if (!(tmp & RT3070_RF_KICK))
break;
}
if (ntries == 100)
return (ETIMEDOUT);
tmp = RT3070_RF_KICK | reg << 8;
if ((error = run_write(sc, RT3070_RF_CSR_CFG, tmp)) != 0)
return (error);
for (ntries = 0; ntries < 100; ntries++) {
if ((error = run_read(sc, RT3070_RF_CSR_CFG, &tmp)) != 0)
return (error);
if (!(tmp & RT3070_RF_KICK))
break;
}
if (ntries == 100)
return (ETIMEDOUT);
*val = tmp & 0xff;
return (0);
}
static int
run_rt3070_rf_write(struct run_softc *sc, uint8_t reg, uint8_t val)
{
uint32_t tmp;
int error, ntries;
for (ntries = 0; ntries < 10; ntries++) {
if ((error = run_read(sc, RT3070_RF_CSR_CFG, &tmp)) != 0)
return (error);
if (!(tmp & RT3070_RF_KICK))
break;
}
if (ntries == 10)
return (ETIMEDOUT);
tmp = RT3070_RF_WRITE | RT3070_RF_KICK | reg << 8 | val;
return (run_write(sc, RT3070_RF_CSR_CFG, tmp));
}
static int
run_bbp_read(struct run_softc *sc, uint8_t reg, uint8_t *val)
{
uint32_t tmp;
int ntries, error;
for (ntries = 0; ntries < 10; ntries++) {
if ((error = run_read(sc, RT2860_BBP_CSR_CFG, &tmp)) != 0)
return (error);
if (!(tmp & RT2860_BBP_CSR_KICK))
break;
}
if (ntries == 10)
return (ETIMEDOUT);
tmp = RT2860_BBP_CSR_READ | RT2860_BBP_CSR_KICK | reg << 8;
if ((error = run_write(sc, RT2860_BBP_CSR_CFG, tmp)) != 0)
return (error);
for (ntries = 0; ntries < 10; ntries++) {
if ((error = run_read(sc, RT2860_BBP_CSR_CFG, &tmp)) != 0)
return (error);
if (!(tmp & RT2860_BBP_CSR_KICK))
break;
}
if (ntries == 10)
return (ETIMEDOUT);
*val = tmp & 0xff;
return (0);
}
static int
run_bbp_write(struct run_softc *sc, uint8_t reg, uint8_t val)
{
uint32_t tmp;
int ntries, error;
for (ntries = 0; ntries < 10; ntries++) {
if ((error = run_read(sc, RT2860_BBP_CSR_CFG, &tmp)) != 0)
return (error);
if (!(tmp & RT2860_BBP_CSR_KICK))
break;
}
if (ntries == 10)
return (ETIMEDOUT);
tmp = RT2860_BBP_CSR_KICK | reg << 8 | val;
return (run_write(sc, RT2860_BBP_CSR_CFG, tmp));
}
/*
* Send a command to the 8051 microcontroller unit.
*/
static int
run_mcu_cmd(struct run_softc *sc, uint8_t cmd, uint16_t arg)
{
uint32_t tmp;
int error, ntries;
for (ntries = 0; ntries < 100; ntries++) {
if ((error = run_read(sc, RT2860_H2M_MAILBOX, &tmp)) != 0)
return error;
if (!(tmp & RT2860_H2M_BUSY))
break;
}
if (ntries == 100)
return ETIMEDOUT;
tmp = RT2860_H2M_BUSY | RT2860_TOKEN_NO_INTR << 16 | arg;
if ((error = run_write(sc, RT2860_H2M_MAILBOX, tmp)) == 0)
error = run_write(sc, RT2860_HOST_CMD, cmd);
return (error);
}
/*
* Add `delta' (signed) to each 4-bit sub-word of a 32-bit word.
* Used to adjust per-rate Tx power registers.
*/
static __inline uint32_t
b4inc(uint32_t b32, int8_t delta)
{
int8_t i, b4;
for (i = 0; i < 8; i++) {
b4 = b32 & 0xf;
b4 += delta;
if (b4 < 0)
b4 = 0;
else if (b4 > 0xf)
b4 = 0xf;
b32 = b32 >> 4 | b4 << 28;
}
return (b32);
}
static const char *
run_get_rf(uint16_t rev)
{
switch (rev) {
case RT2860_RF_2820: return "RT2820";
case RT2860_RF_2850: return "RT2850";
case RT2860_RF_2720: return "RT2720";
case RT2860_RF_2750: return "RT2750";
case RT3070_RF_3020: return "RT3020";
case RT3070_RF_2020: return "RT2020";
case RT3070_RF_3021: return "RT3021";
case RT3070_RF_3022: return "RT3022";
case RT3070_RF_3052: return "RT3052";
case RT3593_RF_3053: return "RT3053";
case RT5592_RF_5592: return "RT5592";
case RT5390_RF_5370: return "RT5370";
case RT5390_RF_5372: return "RT5372";
}
return ("unknown");
}
static void
run_rt3593_get_txpower(struct run_softc *sc)
{
uint16_t addr, val;
int i;
/* Read power settings for 2GHz channels. */
for (i = 0; i < 14; i += 2) {
addr = (sc->ntxchains == 3) ? RT3593_EEPROM_PWR2GHZ_BASE1 :
RT2860_EEPROM_PWR2GHZ_BASE1;
run_srom_read(sc, addr + i / 2, &val);
sc->txpow1[i + 0] = (int8_t)(val & 0xff);
sc->txpow1[i + 1] = (int8_t)(val >> 8);
addr = (sc->ntxchains == 3) ? RT3593_EEPROM_PWR2GHZ_BASE2 :
RT2860_EEPROM_PWR2GHZ_BASE2;
run_srom_read(sc, addr + i / 2, &val);
sc->txpow2[i + 0] = (int8_t)(val & 0xff);
sc->txpow2[i + 1] = (int8_t)(val >> 8);
if (sc->ntxchains == 3) {
run_srom_read(sc, RT3593_EEPROM_PWR2GHZ_BASE3 + i / 2,
&val);
sc->txpow3[i + 0] = (int8_t)(val & 0xff);
sc->txpow3[i + 1] = (int8_t)(val >> 8);
}
}
/* Fix broken Tx power entries. */
for (i = 0; i < 14; i++) {
if (sc->txpow1[i] > 31)
sc->txpow1[i] = 5;
if (sc->txpow2[i] > 31)
sc->txpow2[i] = 5;
if (sc->ntxchains == 3) {
if (sc->txpow3[i] > 31)
sc->txpow3[i] = 5;
}
}
/* Read power settings for 5GHz channels. */
for (i = 0; i < 40; i += 2) {
run_srom_read(sc, RT3593_EEPROM_PWR5GHZ_BASE1 + i / 2, &val);
sc->txpow1[i + 14] = (int8_t)(val & 0xff);
sc->txpow1[i + 15] = (int8_t)(val >> 8);
run_srom_read(sc, RT3593_EEPROM_PWR5GHZ_BASE2 + i / 2, &val);
sc->txpow2[i + 14] = (int8_t)(val & 0xff);
sc->txpow2[i + 15] = (int8_t)(val >> 8);
if (sc->ntxchains == 3) {
run_srom_read(sc, RT3593_EEPROM_PWR5GHZ_BASE3 + i / 2,
&val);
sc->txpow3[i + 14] = (int8_t)(val & 0xff);
sc->txpow3[i + 15] = (int8_t)(val >> 8);
}
}
}
static void
run_get_txpower(struct run_softc *sc)
{
uint16_t val;
int i;
/* Read power settings for 2GHz channels. */
for (i = 0; i < 14; i += 2) {
run_srom_read(sc, RT2860_EEPROM_PWR2GHZ_BASE1 + i / 2, &val);
sc->txpow1[i + 0] = (int8_t)(val & 0xff);
sc->txpow1[i + 1] = (int8_t)(val >> 8);
if (sc->mac_ver != 0x5390) {
run_srom_read(sc,
RT2860_EEPROM_PWR2GHZ_BASE2 + i / 2, &val);
sc->txpow2[i + 0] = (int8_t)(val & 0xff);
sc->txpow2[i + 1] = (int8_t)(val >> 8);
}
}
/* Fix broken Tx power entries. */
for (i = 0; i < 14; i++) {
if (sc->mac_ver >= 0x5390) {
if (sc->txpow1[i] < 0 || sc->txpow1[i] > 27)
sc->txpow1[i] = 5;
} else {
if (sc->txpow1[i] < 0 || sc->txpow1[i] > 31)
sc->txpow1[i] = 5;
}
if (sc->mac_ver > 0x5390) {
if (sc->txpow2[i] < 0 || sc->txpow2[i] > 27)
sc->txpow2[i] = 5;
} else if (sc->mac_ver < 0x5390) {
if (sc->txpow2[i] < 0 || sc->txpow2[i] > 31)
sc->txpow2[i] = 5;
}
DPRINTF("chan %d: power1=%d, power2=%d\n",
rt2860_rf2850[i].chan, sc->txpow1[i], sc->txpow2[i]);
}
/* Read power settings for 5GHz channels. */
for (i = 0; i < 40; i += 2) {
run_srom_read(sc, RT2860_EEPROM_PWR5GHZ_BASE1 + i / 2, &val);
sc->txpow1[i + 14] = (int8_t)(val & 0xff);
sc->txpow1[i + 15] = (int8_t)(val >> 8);
run_srom_read(sc, RT2860_EEPROM_PWR5GHZ_BASE2 + i / 2, &val);
sc->txpow2[i + 14] = (int8_t)(val & 0xff);
sc->txpow2[i + 15] = (int8_t)(val >> 8);
}
/* Fix broken Tx power entries. */
for (i = 0; i < 40; i++ ) {
if (sc->mac_ver != 0x5592) {
if (sc->txpow1[14 + i] < -7 || sc->txpow1[14 + i] > 15)
sc->txpow1[14 + i] = 5;
if (sc->txpow2[14 + i] < -7 || sc->txpow2[14 + i] > 15)
sc->txpow2[14 + i] = 5;
}
DPRINTF("chan %d: power1=%d, power2=%d\n",
rt2860_rf2850[14 + i].chan, sc->txpow1[14 + i],
sc->txpow2[14 + i]);
}
}
static int
run_read_eeprom(struct run_softc *sc)
{
+ struct ieee80211com *ic = &sc->sc_ic;
int8_t delta_2ghz, delta_5ghz;
uint32_t tmp;
uint16_t val;
int ridx, ant, i;
/* check whether the ROM is eFUSE ROM or EEPROM */
sc->sc_srom_read = run_eeprom_read_2;
if (sc->mac_ver >= 0x3070) {
run_read(sc, RT3070_EFUSE_CTRL, &tmp);
DPRINTF("EFUSE_CTRL=0x%08x\n", tmp);
if ((tmp & RT3070_SEL_EFUSE) || sc->mac_ver == 0x3593)
sc->sc_srom_read = run_efuse_read_2;
}
/* read ROM version */
run_srom_read(sc, RT2860_EEPROM_VERSION, &val);
DPRINTF("EEPROM rev=%d, FAE=%d\n", val & 0xff, val >> 8);
/* read MAC address */
run_srom_read(sc, RT2860_EEPROM_MAC01, &val);
- sc->sc_bssid[0] = val & 0xff;
- sc->sc_bssid[1] = val >> 8;
+ ic->ic_macaddr[0] = val & 0xff;
+ ic->ic_macaddr[1] = val >> 8;
run_srom_read(sc, RT2860_EEPROM_MAC23, &val);
- sc->sc_bssid[2] = val & 0xff;
- sc->sc_bssid[3] = val >> 8;
+ ic->ic_macaddr[2] = val & 0xff;
+ ic->ic_macaddr[3] = val >> 8;
run_srom_read(sc, RT2860_EEPROM_MAC45, &val);
- sc->sc_bssid[4] = val & 0xff;
- sc->sc_bssid[5] = val >> 8;
+ ic->ic_macaddr[4] = val & 0xff;
+ ic->ic_macaddr[5] = val >> 8;
if (sc->mac_ver < 0x3593) {
/* read vender BBP settings */
for (i = 0; i < 10; i++) {
run_srom_read(sc, RT2860_EEPROM_BBP_BASE + i, &val);
sc->bbp[i].val = val & 0xff;
sc->bbp[i].reg = val >> 8;
DPRINTF("BBP%d=0x%02x\n", sc->bbp[i].reg,
sc->bbp[i].val);
}
if (sc->mac_ver >= 0x3071) {
/* read vendor RF settings */
for (i = 0; i < 10; i++) {
run_srom_read(sc, RT3071_EEPROM_RF_BASE + i,
&val);
sc->rf[i].val = val & 0xff;
sc->rf[i].reg = val >> 8;
DPRINTF("RF%d=0x%02x\n", sc->rf[i].reg,
sc->rf[i].val);
}
}
}
/* read RF frequency offset from EEPROM */
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_FREQ_LEDS :
RT3593_EEPROM_FREQ, &val);
sc->freq = ((val & 0xff) != 0xff) ? val & 0xff : 0;
DPRINTF("EEPROM freq offset %d\n", sc->freq & 0xff);
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_FREQ_LEDS :
RT3593_EEPROM_FREQ_LEDS, &val);
if (val >> 8 != 0xff) {
/* read LEDs operating mode */
sc->leds = val >> 8;
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LED1 :
RT3593_EEPROM_LED1, &sc->led[0]);
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LED2 :
RT3593_EEPROM_LED2, &sc->led[1]);
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LED3 :
RT3593_EEPROM_LED3, &sc->led[2]);
} else {
/* broken EEPROM, use default settings */
sc->leds = 0x01;
sc->led[0] = 0x5555;
sc->led[1] = 0x2221;
sc->led[2] = 0x5627; /* differs from RT2860 */
}
DPRINTF("EEPROM LED mode=0x%02x, LEDs=0x%04x/0x%04x/0x%04x\n",
sc->leds, sc->led[0], sc->led[1], sc->led[2]);
/* read RF information */
if (sc->mac_ver == 0x5390 || sc->mac_ver ==0x5392)
run_srom_read(sc, 0x00, &val);
else
run_srom_read(sc, RT2860_EEPROM_ANTENNA, &val);
if (val == 0xffff) {
device_printf(sc->sc_dev,
"invalid EEPROM antenna info, using default\n");
DPRINTF("invalid EEPROM antenna info, using default\n");
if (sc->mac_ver == 0x3572) {
/* default to RF3052 2T2R */
sc->rf_rev = RT3070_RF_3052;
sc->ntxchains = 2;
sc->nrxchains = 2;
} else if (sc->mac_ver >= 0x3070) {
/* default to RF3020 1T1R */
sc->rf_rev = RT3070_RF_3020;
sc->ntxchains = 1;
sc->nrxchains = 1;
} else {
/* default to RF2820 1T2R */
sc->rf_rev = RT2860_RF_2820;
sc->ntxchains = 1;
sc->nrxchains = 2;
}
} else {
if (sc->mac_ver == 0x5390 || sc->mac_ver ==0x5392) {
sc->rf_rev = val;
run_srom_read(sc, RT2860_EEPROM_ANTENNA, &val);
} else
sc->rf_rev = (val >> 8) & 0xf;
sc->ntxchains = (val >> 4) & 0xf;
sc->nrxchains = val & 0xf;
}
DPRINTF("EEPROM RF rev=0x%04x chains=%dT%dR\n",
sc->rf_rev, sc->ntxchains, sc->nrxchains);
/* check if RF supports automatic Tx access gain control */
run_srom_read(sc, RT2860_EEPROM_CONFIG, &val);
DPRINTF("EEPROM CFG 0x%04x\n", val);
/* check if driver should patch the DAC issue */
if ((val >> 8) != 0xff)
sc->patch_dac = (val >> 15) & 1;
if ((val & 0xff) != 0xff) {
sc->ext_5ghz_lna = (val >> 3) & 1;
sc->ext_2ghz_lna = (val >> 2) & 1;
/* check if RF supports automatic Tx access gain control */
sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1;
/* check if we have a hardware radio switch */
sc->rfswitch = val & 1;
}
/* Read Tx power settings. */
if (sc->mac_ver == 0x3593)
run_rt3593_get_txpower(sc);
else
run_get_txpower(sc);
/* read Tx power compensation for each Tx rate */
run_srom_read(sc, RT2860_EEPROM_DELTAPWR, &val);
delta_2ghz = delta_5ghz = 0;
if ((val & 0xff) != 0xff && (val & 0x80)) {
delta_2ghz = val & 0xf;
if (!(val & 0x40)) /* negative number */
delta_2ghz = -delta_2ghz;
}
val >>= 8;
if ((val & 0xff) != 0xff && (val & 0x80)) {
delta_5ghz = val & 0xf;
if (!(val & 0x40)) /* negative number */
delta_5ghz = -delta_5ghz;
}
DPRINTF("power compensation=%d (2GHz), %d (5GHz)\n",
delta_2ghz, delta_5ghz);
for (ridx = 0; ridx < 5; ridx++) {
uint32_t reg;
run_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2, &val);
reg = val;
run_srom_read(sc, RT2860_EEPROM_RPWR + ridx * 2 + 1, &val);
reg |= (uint32_t)val << 16;
sc->txpow20mhz[ridx] = reg;
sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz);
sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz);
DPRINTF("ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, "
"40MHz/5GHz=0x%08x\n", ridx, sc->txpow20mhz[ridx],
sc->txpow40mhz_2ghz[ridx], sc->txpow40mhz_5ghz[ridx]);
}
/* Read RSSI offsets and LNA gains from EEPROM. */
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI1_2GHZ :
RT3593_EEPROM_RSSI1_2GHZ, &val);
sc->rssi_2ghz[0] = val & 0xff; /* Ant A */
sc->rssi_2ghz[1] = val >> 8; /* Ant B */
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI2_2GHZ :
RT3593_EEPROM_RSSI2_2GHZ, &val);
if (sc->mac_ver >= 0x3070) {
if (sc->mac_ver == 0x3593) {
sc->txmixgain_2ghz = 0;
sc->rssi_2ghz[2] = val & 0xff; /* Ant C */
} else {
/*
* On RT3070 chips (limited to 2 Rx chains), this ROM
* field contains the Tx mixer gain for the 2GHz band.
*/
if ((val & 0xff) != 0xff)
sc->txmixgain_2ghz = val & 0x7;
}
DPRINTF("tx mixer gain=%u (2GHz)\n", sc->txmixgain_2ghz);
} else
sc->rssi_2ghz[2] = val & 0xff; /* Ant C */
if (sc->mac_ver == 0x3593)
run_srom_read(sc, RT3593_EEPROM_LNA_5GHZ, &val);
sc->lna[2] = val >> 8; /* channel group 2 */
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI1_5GHZ :
RT3593_EEPROM_RSSI1_5GHZ, &val);
sc->rssi_5ghz[0] = val & 0xff; /* Ant A */
sc->rssi_5ghz[1] = val >> 8; /* Ant B */
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_RSSI2_5GHZ :
RT3593_EEPROM_RSSI2_5GHZ, &val);
if (sc->mac_ver == 0x3572) {
/*
* On RT3572 chips (limited to 2 Rx chains), this ROM
* field contains the Tx mixer gain for the 5GHz band.
*/
if ((val & 0xff) != 0xff)
sc->txmixgain_5ghz = val & 0x7;
DPRINTF("tx mixer gain=%u (5GHz)\n", sc->txmixgain_5ghz);
} else
sc->rssi_5ghz[2] = val & 0xff; /* Ant C */
if (sc->mac_ver == 0x3593) {
sc->txmixgain_5ghz = 0;
run_srom_read(sc, RT3593_EEPROM_LNA_5GHZ, &val);
}
sc->lna[3] = val >> 8; /* channel group 3 */
run_srom_read(sc, (sc->mac_ver != 0x3593) ? RT2860_EEPROM_LNA :
RT3593_EEPROM_LNA, &val);
sc->lna[0] = val & 0xff; /* channel group 0 */
sc->lna[1] = val >> 8; /* channel group 1 */
/* fix broken 5GHz LNA entries */
if (sc->lna[2] == 0 || sc->lna[2] == 0xff) {
DPRINTF("invalid LNA for channel group %d\n", 2);
sc->lna[2] = sc->lna[1];
}
if (sc->lna[3] == 0 || sc->lna[3] == 0xff) {
DPRINTF("invalid LNA for channel group %d\n", 3);
sc->lna[3] = sc->lna[1];
}
/* fix broken RSSI offset entries */
for (ant = 0; ant < 3; ant++) {
if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) {
DPRINTF("invalid RSSI%d offset: %d (2GHz)\n",
ant + 1, sc->rssi_2ghz[ant]);
sc->rssi_2ghz[ant] = 0;
}
if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) {
DPRINTF("invalid RSSI%d offset: %d (5GHz)\n",
ant + 1, sc->rssi_5ghz[ant]);
sc->rssi_5ghz[ant] = 0;
}
}
return (0);
}
static struct ieee80211_node *
run_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
return malloc(sizeof (struct run_node), M_DEVBUF, M_NOWAIT | M_ZERO);
}
static int
run_media_change(struct ifnet *ifp)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
const struct ieee80211_txparam *tp;
struct run_softc *sc = ic->ic_softc;
uint8_t rate, ridx;
int error;
RUN_LOCK(sc);
error = ieee80211_media_change(ifp);
if (error != ENETRESET) {
RUN_UNLOCK(sc);
return (error);
}
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
struct ieee80211_node *ni;
struct run_node *rn;
rate = ic->ic_sup_rates[ic->ic_curmode].
rs_rates[tp->ucastrate] & IEEE80211_RATE_VAL;
for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++)
if (rt2860_rates[ridx].rate == rate)
break;
ni = ieee80211_ref_node(vap->iv_bss);
rn = (struct run_node *)ni;
rn->fix_ridx = ridx;
DPRINTF("rate=%d, fix_ridx=%d\n", rate, rn->fix_ridx);
ieee80211_free_node(ni);
}
#if 0
if ((ifp->if_flags & IFF_UP) &&
- (ifp->if_drv_flags & IFF_DRV_RUNNING)){
+ (ifp->if_drv_flags & RUN_RUNNING)){
run_init_locked(sc);
}
#endif
RUN_UNLOCK(sc);
return (0);
}
static int
run_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
const struct ieee80211_txparam *tp;
struct ieee80211com *ic = vap->iv_ic;
struct run_softc *sc = ic->ic_softc;
struct run_vap *rvp = RUN_VAP(vap);
enum ieee80211_state ostate;
uint32_t sta[3];
uint32_t tmp;
uint8_t ratectl;
uint8_t restart_ratectl = 0;
uint8_t bid = 1 << rvp->rvp_id;
ostate = vap->iv_state;
DPRINTF("%s -> %s\n",
ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
RUN_LOCK(sc);
ratectl = sc->ratectl_run; /* remember current state */
sc->ratectl_run = RUN_RATECTL_OFF;
usb_callout_stop(&sc->ratectl_ch);
if (ostate == IEEE80211_S_RUN) {
/* turn link LED off */
run_set_leds(sc, RT2860_LED_RADIO);
}
switch (nstate) {
case IEEE80211_S_INIT:
restart_ratectl = 1;
if (ostate != IEEE80211_S_RUN)
break;
ratectl &= ~bid;
sc->runbmap &= ~bid;
/* abort TSF synchronization if there is no vap running */
if (--sc->running == 0) {
run_read(sc, RT2860_BCN_TIME_CFG, &tmp);
run_write(sc, RT2860_BCN_TIME_CFG,
tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN |
RT2860_TBTT_TIMER_EN));
}
break;
case IEEE80211_S_RUN:
if (!(sc->runbmap & bid)) {
if(sc->running++)
restart_ratectl = 1;
sc->runbmap |= bid;
}
m_freem(rvp->beacon_mbuf);
rvp->beacon_mbuf = NULL;
switch (vap->iv_opmode) {
case IEEE80211_M_HOSTAP:
case IEEE80211_M_MBSS:
sc->ap_running |= bid;
ic->ic_opmode = vap->iv_opmode;
run_update_beacon_cb(vap);
break;
case IEEE80211_M_IBSS:
sc->adhoc_running |= bid;
if (!sc->ap_running)
ic->ic_opmode = vap->iv_opmode;
run_update_beacon_cb(vap);
break;
case IEEE80211_M_STA:
sc->sta_running |= bid;
if (!sc->ap_running && !sc->adhoc_running)
ic->ic_opmode = vap->iv_opmode;
/* read statistic counters (clear on read) */
run_read_region_1(sc, RT2860_TX_STA_CNT0,
(uint8_t *)sta, sizeof sta);
break;
default:
ic->ic_opmode = vap->iv_opmode;
break;
}
if (vap->iv_opmode != IEEE80211_M_MONITOR) {
struct ieee80211_node *ni;
if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
RUN_UNLOCK(sc);
IEEE80211_LOCK(ic);
return (-1);
}
run_updateslot(ic);
run_enable_mrr(sc);
run_set_txpreamble(sc);
run_set_basicrates(sc);
ni = ieee80211_ref_node(vap->iv_bss);
- IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid);
+ IEEE80211_ADDR_COPY(ic->ic_macaddr, ni->ni_bssid);
run_set_bssid(sc, ni->ni_bssid);
ieee80211_free_node(ni);
run_enable_tsf_sync(sc);
/* enable automatic rate adaptation */
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE)
ratectl |= bid;
}
/* turn link LED on */
run_set_leds(sc, RT2860_LED_RADIO |
(IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ?
RT2860_LED_LINK_2GHZ : RT2860_LED_LINK_5GHZ));
break;
default:
DPRINTFN(6, "undefined case\n");
break;
}
/* restart amrr for running VAPs */
if ((sc->ratectl_run = ratectl) && restart_ratectl)
usb_callout_reset(&sc->ratectl_ch, hz, run_ratectl_to, sc);
RUN_UNLOCK(sc);
IEEE80211_LOCK(ic);
return(rvp->newstate(vap, nstate, arg));
}
/* ARGSUSED */
static void
run_wme_update_cb(void *arg)
{
struct ieee80211com *ic = arg;
struct run_softc *sc = ic->ic_softc;
struct ieee80211_wme_state *wmesp = &ic->ic_wme;
int aci, error = 0;
RUN_LOCK_ASSERT(sc, MA_OWNED);
/* update MAC TX configuration registers */
for (aci = 0; aci < WME_NUM_AC; aci++) {
error = run_write(sc, RT2860_EDCA_AC_CFG(aci),
wmesp->wme_params[aci].wmep_logcwmax << 16 |
wmesp->wme_params[aci].wmep_logcwmin << 12 |
wmesp->wme_params[aci].wmep_aifsn << 8 |
wmesp->wme_params[aci].wmep_txopLimit);
if (error) goto err;
}
/* update SCH/DMA registers too */
error = run_write(sc, RT2860_WMM_AIFSN_CFG,
wmesp->wme_params[WME_AC_VO].wmep_aifsn << 12 |
wmesp->wme_params[WME_AC_VI].wmep_aifsn << 8 |
wmesp->wme_params[WME_AC_BK].wmep_aifsn << 4 |
wmesp->wme_params[WME_AC_BE].wmep_aifsn);
if (error) goto err;
error = run_write(sc, RT2860_WMM_CWMIN_CFG,
wmesp->wme_params[WME_AC_VO].wmep_logcwmin << 12 |
wmesp->wme_params[WME_AC_VI].wmep_logcwmin << 8 |
wmesp->wme_params[WME_AC_BK].wmep_logcwmin << 4 |
wmesp->wme_params[WME_AC_BE].wmep_logcwmin);
if (error) goto err;
error = run_write(sc, RT2860_WMM_CWMAX_CFG,
wmesp->wme_params[WME_AC_VO].wmep_logcwmax << 12 |
wmesp->wme_params[WME_AC_VI].wmep_logcwmax << 8 |
wmesp->wme_params[WME_AC_BK].wmep_logcwmax << 4 |
wmesp->wme_params[WME_AC_BE].wmep_logcwmax);
if (error) goto err;
error = run_write(sc, RT2860_WMM_TXOP0_CFG,
wmesp->wme_params[WME_AC_BK].wmep_txopLimit << 16 |
wmesp->wme_params[WME_AC_BE].wmep_txopLimit);
if (error) goto err;
error = run_write(sc, RT2860_WMM_TXOP1_CFG,
wmesp->wme_params[WME_AC_VO].wmep_txopLimit << 16 |
wmesp->wme_params[WME_AC_VI].wmep_txopLimit);
err:
if (error)
DPRINTF("WME update failed\n");
return;
}
static int
run_wme_update(struct ieee80211com *ic)
{
struct run_softc *sc = ic->ic_softc;
/* sometime called wothout lock */
if (mtx_owned(&ic->ic_comlock.mtx)) {
uint32_t i = RUN_CMDQ_GET(&sc->cmdq_store);
DPRINTF("cmdq_store=%d\n", i);
sc->cmdq[i].func = run_wme_update_cb;
sc->cmdq[i].arg0 = ic;
ieee80211_runtask(ic, &sc->cmdq_task);
return (0);
}
RUN_LOCK(sc);
run_wme_update_cb(ic);
RUN_UNLOCK(sc);
/* return whatever, upper layer desn't care anyway */
return (0);
}
static void
run_key_update_begin(struct ieee80211vap *vap)
{
/*
* To avoid out-of-order events, both run_key_set() and
* _delete() are deferred and handled by run_cmdq_cb().
* So, there is nothing we need to do here.
*/
}
static void
run_key_update_end(struct ieee80211vap *vap)
{
/* null */
}
static void
run_key_set_cb(void *arg)
{
struct run_cmdq *cmdq = arg;
struct ieee80211vap *vap = cmdq->arg1;
struct ieee80211_key *k = cmdq->k;
struct ieee80211com *ic = vap->iv_ic;
struct run_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
uint32_t attr;
uint16_t base, associd;
uint8_t mode, wcid, iv[8];
RUN_LOCK_ASSERT(sc, MA_OWNED);
if (vap->iv_opmode == IEEE80211_M_HOSTAP)
ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac);
else
ni = vap->iv_bss;
associd = (ni != NULL) ? ni->ni_associd : 0;
/* map net80211 cipher to RT2860 security mode */
switch (k->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_WEP:
if(k->wk_keylen < 8)
mode = RT2860_MODE_WEP40;
else
mode = RT2860_MODE_WEP104;
break;
case IEEE80211_CIPHER_TKIP:
mode = RT2860_MODE_TKIP;
break;
case IEEE80211_CIPHER_AES_CCM:
mode = RT2860_MODE_AES_CCMP;
break;
default:
DPRINTF("undefined case\n");
return;
}
DPRINTFN(1, "associd=%x, keyix=%d, mode=%x, type=%s, tx=%s, rx=%s\n",
associd, k->wk_keyix, mode,
(k->wk_flags & IEEE80211_KEY_GROUP) ? "group" : "pairwise",
(k->wk_flags & IEEE80211_KEY_XMIT) ? "on" : "off",
(k->wk_flags & IEEE80211_KEY_RECV) ? "on" : "off");
if (k->wk_flags & IEEE80211_KEY_GROUP) {
wcid = 0; /* NB: update WCID0 for group keys */
base = RT2860_SKEY(RUN_VAP(vap)->rvp_id, k->wk_keyix);
} else {
wcid = (vap->iv_opmode == IEEE80211_M_STA) ?
1 : RUN_AID2WCID(associd);
base = RT2860_PKEY(wcid);
}
if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP) {
if(run_write_region_1(sc, base, k->wk_key, 16))
return;
if(run_write_region_1(sc, base + 16, &k->wk_key[16], 8)) /* wk_txmic */
return;
if(run_write_region_1(sc, base + 24, &k->wk_key[24], 8)) /* wk_rxmic */
return;
} else {
/* roundup len to 16-bit: XXX fix write_region_1() instead */
if(run_write_region_1(sc, base, k->wk_key, (k->wk_keylen + 1) & ~1))
return;
}
if (!(k->wk_flags & IEEE80211_KEY_GROUP) ||
(k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) {
/* set initial packet number in IV+EIV */
if (k->wk_cipher == IEEE80211_CIPHER_WEP) {
memset(iv, 0, sizeof iv);
iv[3] = vap->iv_def_txkey << 6;
} else {
if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP) {
iv[0] = k->wk_keytsc >> 8;
iv[1] = (iv[0] | 0x20) & 0x7f;
iv[2] = k->wk_keytsc;
} else /* CCMP */ {
iv[0] = k->wk_keytsc;
iv[1] = k->wk_keytsc >> 8;
iv[2] = 0;
}
iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV;
iv[4] = k->wk_keytsc >> 16;
iv[5] = k->wk_keytsc >> 24;
iv[6] = k->wk_keytsc >> 32;
iv[7] = k->wk_keytsc >> 40;
}
if (run_write_region_1(sc, RT2860_IVEIV(wcid), iv, 8))
return;
}
if (k->wk_flags & IEEE80211_KEY_GROUP) {
/* install group key */
if (run_read(sc, RT2860_SKEY_MODE_0_7, &attr))
return;
attr &= ~(0xf << (k->wk_keyix * 4));
attr |= mode << (k->wk_keyix * 4);
if (run_write(sc, RT2860_SKEY_MODE_0_7, attr))
return;
} else {
/* install pairwise key */
if (run_read(sc, RT2860_WCID_ATTR(wcid), &attr))
return;
attr = (attr & ~0xf) | (mode << 1) | RT2860_RX_PKEY_EN;
if (run_write(sc, RT2860_WCID_ATTR(wcid), attr))
return;
}
/* TODO create a pass-thru key entry? */
/* need wcid to delete the right key later */
k->wk_pad = wcid;
}
/*
* Don't have to be deferred, but in order to keep order of
* execution, i.e. with run_key_delete(), defer this and let
* run_cmdq_cb() maintain the order.
*
* return 0 on error
*/
static int
run_key_set(struct ieee80211vap *vap, struct ieee80211_key *k,
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ieee80211com *ic = vap->iv_ic;
struct run_softc *sc = ic->ic_softc;
uint32_t i;
i = RUN_CMDQ_GET(&sc->cmdq_store);
DPRINTF("cmdq_store=%d\n", i);
sc->cmdq[i].func = run_key_set_cb;
sc->cmdq[i].arg0 = NULL;
sc->cmdq[i].arg1 = vap;
sc->cmdq[i].k = k;
IEEE80211_ADDR_COPY(sc->cmdq[i].mac, mac);
ieee80211_runtask(ic, &sc->cmdq_task);
/*
* To make sure key will be set when hostapd
* calls iv_key_set() before if_init().
*/
if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
RUN_LOCK(sc);
sc->cmdq_key_set = RUN_CMDQ_GO;
RUN_UNLOCK(sc);
}
return (1);
}
/*
* If wlan is destroyed without being brought down i.e. without
* wlan down or wpa_cli terminate, this function is called after
* vap is gone. Don't refer it.
*/
static void
run_key_delete_cb(void *arg)
{
struct run_cmdq *cmdq = arg;
struct run_softc *sc = cmdq->arg1;
struct ieee80211_key *k = &cmdq->key;
uint32_t attr;
uint8_t wcid;
RUN_LOCK_ASSERT(sc, MA_OWNED);
if (k->wk_flags & IEEE80211_KEY_GROUP) {
/* remove group key */
DPRINTF("removing group key\n");
run_read(sc, RT2860_SKEY_MODE_0_7, &attr);
attr &= ~(0xf << (k->wk_keyix * 4));
run_write(sc, RT2860_SKEY_MODE_0_7, attr);
} else {
/* remove pairwise key */
DPRINTF("removing key for wcid %x\n", k->wk_pad);
/* matching wcid was written to wk_pad in run_key_set() */
wcid = k->wk_pad;
run_read(sc, RT2860_WCID_ATTR(wcid), &attr);
attr &= ~0xf;
run_write(sc, RT2860_WCID_ATTR(wcid), attr);
run_set_region_4(sc, RT2860_WCID_ENTRY(wcid), 0, 8);
}
k->wk_pad = 0;
}
/*
* return 0 on error
*/
static int
run_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k)
{
struct ieee80211com *ic = vap->iv_ic;
struct run_softc *sc = ic->ic_softc;
struct ieee80211_key *k0;
uint32_t i;
/*
* When called back, key might be gone. So, make a copy
* of some values need to delete keys before deferring.
* But, because of LOR with node lock, cannot use lock here.
* So, use atomic instead.
*/
i = RUN_CMDQ_GET(&sc->cmdq_store);
DPRINTF("cmdq_store=%d\n", i);
sc->cmdq[i].func = run_key_delete_cb;
sc->cmdq[i].arg0 = NULL;
sc->cmdq[i].arg1 = sc;
k0 = &sc->cmdq[i].key;
k0->wk_flags = k->wk_flags;
k0->wk_keyix = k->wk_keyix;
/* matching wcid was written to wk_pad in run_key_set() */
k0->wk_pad = k->wk_pad;
ieee80211_runtask(ic, &sc->cmdq_task);
return (1); /* return fake success */
}
static void
run_ratectl_to(void *arg)
{
struct run_softc *sc = arg;
/* do it in a process context, so it can go sleep */
- ieee80211_runtask(sc->sc_ifp->if_l2com, &sc->ratectl_task);
+ ieee80211_runtask(&sc->sc_ic, &sc->ratectl_task);
/* next timeout will be rescheduled in the callback task */
}
/* ARGSUSED */
static void
run_ratectl_cb(void *arg, int pending)
{
struct run_softc *sc = arg;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
if (vap == NULL)
return;
if (sc->rvp_cnt > 1 || vap->iv_opmode != IEEE80211_M_STA) {
/*
* run_reset_livelock() doesn't do anything with AMRR,
* but Ralink wants us to call it every 1 sec. So, we
* piggyback here rather than creating another callout.
* Livelock may occur only in HOSTAP or IBSS mode
* (when h/w is sending beacons).
*/
RUN_LOCK(sc);
run_reset_livelock(sc);
/* just in case, there are some stats to drain */
run_drain_fifo(sc);
RUN_UNLOCK(sc);
}
ieee80211_iterate_nodes(&ic->ic_sta, run_iter_func, sc);
RUN_LOCK(sc);
if(sc->ratectl_run != RUN_RATECTL_OFF)
usb_callout_reset(&sc->ratectl_ch, hz, run_ratectl_to, sc);
RUN_UNLOCK(sc);
}
static void
run_drain_fifo(void *arg)
{
struct run_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t stat;
uint16_t (*wstat)[3];
uint8_t wcid, mcs, pid;
int8_t retry;
RUN_LOCK_ASSERT(sc, MA_OWNED);
for (;;) {
/* drain Tx status FIFO (maxsize = 16) */
run_read(sc, RT2860_TX_STAT_FIFO, &stat);
DPRINTFN(4, "tx stat 0x%08x\n", stat);
if (!(stat & RT2860_TXQ_VLD))
break;
wcid = (stat >> RT2860_TXQ_WCID_SHIFT) & 0xff;
/* if no ACK was requested, no feedback is available */
if (!(stat & RT2860_TXQ_ACKREQ) || wcid > RT2870_WCID_MAX ||
wcid == 0)
continue;
/*
* Even though each stat is Tx-complete-status like format,
* the device can poll stats. Because there is no guarantee
* that the referring node is still around when read the stats.
* So that, if we use ieee80211_ratectl_tx_update(), we will
* have hard time not to refer already freed node.
*
* To eliminate such page faults, we poll stats in softc.
* Then, update the rates later with ieee80211_ratectl_tx_update().
*/
wstat = &(sc->wcid_stats[wcid]);
(*wstat)[RUN_TXCNT]++;
if (stat & RT2860_TXQ_OK)
(*wstat)[RUN_SUCCESS]++;
else
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
/*
* Check if there were retries, ie if the Tx success rate is
* different from the requested rate. Note that it works only
* because we do not allow rate fallback from OFDM to CCK.
*/
mcs = (stat >> RT2860_TXQ_MCS_SHIFT) & 0x7f;
pid = (stat >> RT2860_TXQ_PID_SHIFT) & 0xf;
if ((retry = pid -1 - mcs) > 0) {
(*wstat)[RUN_TXCNT] += retry;
(*wstat)[RUN_RETRY] += retry;
}
}
DPRINTFN(3, "count=%d\n", sc->fifo_cnt);
sc->fifo_cnt = 0;
}
static void
run_iter_func(void *arg, struct ieee80211_node *ni)
{
struct run_softc *sc = arg;
struct ieee80211vap *vap = ni->ni_vap;
- struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct run_node *rn = (void *)ni;
union run_stats sta[2];
uint16_t (*wstat)[3];
int txcnt, success, retrycnt, error;
RUN_LOCK(sc);
/* Check for special case */
if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA &&
ni != vap->iv_bss)
goto fail;
if (sc->rvp_cnt <= 1 && (vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_STA)) {
/* read statistic counters (clear on read) and update AMRR state */
error = run_read_region_1(sc, RT2860_TX_STA_CNT0, (uint8_t *)sta,
sizeof sta);
if (error != 0)
goto fail;
/* count failed TX as errors */
- if_inc_counter(ifp, IFCOUNTER_OERRORS, le16toh(sta[0].error.fail));
+ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS,
+ le16toh(sta[0].error.fail));
retrycnt = le16toh(sta[1].tx.retry);
success = le16toh(sta[1].tx.success);
txcnt = retrycnt + success + le16toh(sta[0].error.fail);
DPRINTFN(3, "retrycnt=%d success=%d failcnt=%d\n",
retrycnt, success, le16toh(sta[0].error.fail));
} else {
wstat = &(sc->wcid_stats[RUN_AID2WCID(ni->ni_associd)]);
if (wstat == &(sc->wcid_stats[0]) ||
wstat > &(sc->wcid_stats[RT2870_WCID_MAX]))
goto fail;
txcnt = (*wstat)[RUN_TXCNT];
success = (*wstat)[RUN_SUCCESS];
retrycnt = (*wstat)[RUN_RETRY];
DPRINTFN(3, "retrycnt=%d txcnt=%d success=%d\n",
retrycnt, txcnt, success);
memset(wstat, 0, sizeof(*wstat));
}
ieee80211_ratectl_tx_update(vap, ni, &txcnt, &success, &retrycnt);
rn->amrr_ridx = ieee80211_ratectl_rate(ni, NULL, 0);
fail:
RUN_UNLOCK(sc);
DPRINTFN(3, "ridx=%d\n", rn->amrr_ridx);
}
static void
run_newassoc_cb(void *arg)
{
struct run_cmdq *cmdq = arg;
struct ieee80211_node *ni = cmdq->arg1;
struct run_softc *sc = ni->ni_vap->iv_ic->ic_softc;
uint8_t wcid = cmdq->wcid;
RUN_LOCK_ASSERT(sc, MA_OWNED);
run_write_region_1(sc, RT2860_WCID_ENTRY(wcid),
ni->ni_macaddr, IEEE80211_ADDR_LEN);
memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid]));
}
static void
run_newassoc(struct ieee80211_node *ni, int isnew)
{
struct run_node *rn = (void *)ni;
struct ieee80211_rateset *rs = &ni->ni_rates;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = vap->iv_ic;
struct run_softc *sc = ic->ic_softc;
uint8_t rate;
uint8_t ridx;
uint8_t wcid;
int i, j;
wcid = (vap->iv_opmode == IEEE80211_M_STA) ?
1 : RUN_AID2WCID(ni->ni_associd);
if (wcid > RT2870_WCID_MAX) {
device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid);
return;
}
/* only interested in true associations */
if (isnew && ni->ni_associd != 0) {
/*
* This function could is called though timeout function.
* Need to defer.
*/
uint32_t cnt = RUN_CMDQ_GET(&sc->cmdq_store);
DPRINTF("cmdq_store=%d\n", cnt);
sc->cmdq[cnt].func = run_newassoc_cb;
sc->cmdq[cnt].arg0 = NULL;
sc->cmdq[cnt].arg1 = ni;
sc->cmdq[cnt].wcid = wcid;
ieee80211_runtask(ic, &sc->cmdq_task);
}
DPRINTF("new assoc isnew=%d associd=%x addr=%s\n",
isnew, ni->ni_associd, ether_sprintf(ni->ni_macaddr));
for (i = 0; i < rs->rs_nrates; i++) {
rate = rs->rs_rates[i] & IEEE80211_RATE_VAL;
/* convert 802.11 rate to hardware rate index */
for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++)
if (rt2860_rates[ridx].rate == rate)
break;
rn->ridx[i] = ridx;
/* determine rate of control response frames */
for (j = i; j >= 0; j--) {
if ((rs->rs_rates[j] & IEEE80211_RATE_BASIC) &&
rt2860_rates[rn->ridx[i]].phy ==
rt2860_rates[rn->ridx[j]].phy)
break;
}
if (j >= 0) {
rn->ctl_ridx[i] = rn->ridx[j];
} else {
/* no basic rate found, use mandatory one */
rn->ctl_ridx[i] = rt2860_rates[ridx].ctl_ridx;
}
DPRINTF("rate=0x%02x ridx=%d ctl_ridx=%d\n",
rs->rs_rates[i], rn->ridx[i], rn->ctl_ridx[i]);
}
rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate;
for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++)
if (rt2860_rates[ridx].rate == rate)
break;
rn->mgt_ridx = ridx;
DPRINTF("rate=%d, mgmt_ridx=%d\n", rate, rn->mgt_ridx);
RUN_LOCK(sc);
if(sc->ratectl_run != RUN_RATECTL_OFF)
usb_callout_reset(&sc->ratectl_ch, hz, run_ratectl_to, sc);
RUN_UNLOCK(sc);
}
/*
* Return the Rx chain with the highest RSSI for a given frame.
*/
static __inline uint8_t
run_maxrssi_chain(struct run_softc *sc, const struct rt2860_rxwi *rxwi)
{
uint8_t rxchain = 0;
if (sc->nrxchains > 1) {
if (rxwi->rssi[1] > rxwi->rssi[rxchain])
rxchain = 1;
if (sc->nrxchains > 2)
if (rxwi->rssi[2] > rxwi->rssi[rxchain])
rxchain = 2;
}
return (rxchain);
}
static void
run_rx_frame(struct run_softc *sc, struct mbuf *m, uint32_t dmalen)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct rt2870_rxd *rxd;
struct rt2860_rxwi *rxwi;
uint32_t flags;
uint16_t len, rxwisize;
uint8_t ant, rssi;
int8_t nf;
rxwi = mtod(m, struct rt2860_rxwi *);
len = le16toh(rxwi->len) & 0xfff;
rxwisize = sizeof(struct rt2860_rxwi);
if (sc->mac_ver == 0x5592)
rxwisize += sizeof(uint64_t);
else if (sc->mac_ver == 0x3593)
rxwisize += sizeof(uint32_t);
if (__predict_false(len > dmalen)) {
m_freem(m);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
DPRINTF("bad RXWI length %u > %u\n", len, dmalen);
return;
}
/* Rx descriptor is located at the end */
rxd = (struct rt2870_rxd *)(mtod(m, caddr_t) + dmalen);
flags = le32toh(rxd->flags);
if (__predict_false(flags & (RT2860_RX_CRCERR | RT2860_RX_ICVERR))) {
m_freem(m);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
DPRINTF("%s error.\n", (flags & RT2860_RX_CRCERR)?"CRC":"ICV");
return;
}
m->m_data += rxwisize;
m->m_pkthdr.len = m->m_len -= rxwisize;
wh = mtod(m, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
m->m_flags |= M_WEP;
}
if (flags & RT2860_RX_L2PAD) {
DPRINTFN(8, "received RT2860_RX_L2PAD frame\n");
len += 2;
}
ni = ieee80211_find_rxnode(ic,
mtod(m, struct ieee80211_frame_min *));
if (__predict_false(flags & RT2860_RX_MICERR)) {
/* report MIC failures to net80211 for TKIP */
if (ni != NULL)
ieee80211_notify_michael_failure(ni->ni_vap, wh,
rxwi->keyidx);
m_freem(m);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
DPRINTF("MIC error. Someone is lying.\n");
return;
}
ant = run_maxrssi_chain(sc, rxwi);
rssi = rxwi->rssi[ant];
nf = run_rssi2dbm(sc, rssi, ant);
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = len;
if (ni != NULL) {
(void)ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else {
(void)ieee80211_input_all(ic, m, rssi, nf);
}
if (__predict_false(ieee80211_radiotap_active(ic))) {
struct run_rx_radiotap_header *tap = &sc->sc_rxtap;
uint16_t phy;
tap->wr_flags = 0;
tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq);
tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
tap->wr_antsignal = rssi;
tap->wr_antenna = ant;
tap->wr_dbm_antsignal = run_rssi2dbm(sc, rssi, ant);
tap->wr_rate = 2; /* in case it can't be found below */
phy = le16toh(rxwi->phy);
switch (phy & RT2860_PHY_MODE) {
case RT2860_PHY_CCK:
switch ((phy & RT2860_PHY_MCS) & ~RT2860_PHY_SHPRE) {
case 0: tap->wr_rate = 2; break;
case 1: tap->wr_rate = 4; break;
case 2: tap->wr_rate = 11; break;
case 3: tap->wr_rate = 22; break;
}
if (phy & RT2860_PHY_SHPRE)
tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
break;
case RT2860_PHY_OFDM:
switch (phy & RT2860_PHY_MCS) {
case 0: tap->wr_rate = 12; break;
case 1: tap->wr_rate = 18; break;
case 2: tap->wr_rate = 24; break;
case 3: tap->wr_rate = 36; break;
case 4: tap->wr_rate = 48; break;
case 5: tap->wr_rate = 72; break;
case 6: tap->wr_rate = 96; break;
case 7: tap->wr_rate = 108; break;
}
break;
}
}
}
static void
run_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct run_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mbuf *m = NULL;
struct mbuf *m0;
uint32_t dmalen;
uint16_t rxwisize;
int xferlen;
rxwisize = sizeof(struct rt2860_rxwi);
if (sc->mac_ver == 0x5592)
rxwisize += sizeof(uint64_t);
else if (sc->mac_ver == 0x3593)
rxwisize += sizeof(uint32_t);
usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(15, "rx done, actlen=%d\n", xferlen);
if (xferlen < (int)(sizeof(uint32_t) + rxwisize +
sizeof(struct rt2870_rxd))) {
DPRINTF("xfer too short %d\n", xferlen);
goto tr_setup;
}
m = sc->rx_m;
sc->rx_m = NULL;
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
if (sc->rx_m == NULL) {
sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
MJUMPAGESIZE /* xfer can be bigger than MCLBYTES */);
}
if (sc->rx_m == NULL) {
DPRINTF("could not allocate mbuf - idle with stall\n");
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
usbd_xfer_set_stall(xfer);
usbd_xfer_set_frames(xfer, 0);
} else {
/*
* Directly loading a mbuf cluster into DMA to
* save some data copying. This works because
* there is only one cluster.
*/
usbd_xfer_set_frame_data(xfer, 0,
mtod(sc->rx_m, caddr_t), RUN_MAX_RXSZ);
usbd_xfer_set_frames(xfer, 1);
}
usbd_transfer_submit(xfer);
break;
default: /* Error */
if (error != USB_ERR_CANCELLED) {
/* try to clear stall first */
usbd_xfer_set_stall(xfer);
-
if (error == USB_ERR_TIMEOUT)
device_printf(sc->sc_dev, "device timeout\n");
-
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
-
+ counter_u64_add(ic->ic_ierrors, 1);
goto tr_setup;
}
if (sc->rx_m != NULL) {
m_freem(sc->rx_m);
sc->rx_m = NULL;
}
break;
}
if (m == NULL)
return;
/* inputting all the frames must be last */
RUN_UNLOCK(sc);
m->m_pkthdr.len = m->m_len = xferlen;
/* HW can aggregate multiple 802.11 frames in a single USB xfer */
for(;;) {
dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff;
if ((dmalen >= (uint32_t)-8) || (dmalen == 0) ||
((dmalen & 3) != 0)) {
DPRINTF("bad DMA length %u\n", dmalen);
break;
}
if ((dmalen + 8) > (uint32_t)xferlen) {
DPRINTF("bad DMA length %u > %d\n",
dmalen + 8, xferlen);
break;
}
/* If it is the last one or a single frame, we won't copy. */
if ((xferlen -= dmalen + 8) <= 8) {
/* trim 32-bit DMA-len header */
m->m_data += 4;
m->m_pkthdr.len = m->m_len -= 4;
run_rx_frame(sc, m, dmalen);
m = NULL; /* don't free source buffer */
break;
}
/* copy aggregated frames to another mbuf */
m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (__predict_false(m0 == NULL)) {
DPRINTF("could not allocate mbuf\n");
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
break;
}
m_copydata(m, 4 /* skip 32-bit DMA-len header */,
dmalen + sizeof(struct rt2870_rxd), mtod(m0, caddr_t));
m0->m_pkthdr.len = m0->m_len =
dmalen + sizeof(struct rt2870_rxd);
run_rx_frame(sc, m0, dmalen);
/* update data ptr */
m->m_data += dmalen + 8;
m->m_pkthdr.len = m->m_len -= dmalen + 8;
}
/* make sure we free the source buffer, if any */
m_freem(m);
RUN_LOCK(sc);
}
static void
run_tx_free(struct run_endpoint_queue *pq,
struct run_tx_data *data, int txerr)
{
if (data->m != NULL) {
if (data->m->m_flags & M_TXCB)
ieee80211_process_callback(data->ni, data->m,
txerr ? ETIMEDOUT : 0);
m_freem(data->m);
data->m = NULL;
if (data->ni == NULL) {
DPRINTF("no node\n");
} else {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
}
STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
pq->tx_nfree++;
}
static void
run_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index)
{
struct run_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct run_tx_data *data;
struct ieee80211vap *vap = NULL;
struct usb_page_cache *pc;
struct run_endpoint_queue *pq = &sc->sc_epq[index];
struct mbuf *m;
usb_frlength_t size;
int actlen;
int sumlen;
usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(11, "transfer complete: %d "
"bytes @ index %d\n", actlen, index);
data = usbd_xfer_get_priv(xfer);
-
run_tx_free(pq, data, 0);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
usbd_xfer_set_priv(xfer, NULL);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
data = STAILQ_FIRST(&pq->tx_qh);
if (data == NULL)
break;
STAILQ_REMOVE_HEAD(&pq->tx_qh, next);
m = data->m;
size = (sc->mac_ver == 0x5592) ?
sizeof(data->desc) + sizeof(uint32_t) : sizeof(data->desc);
if ((m->m_pkthdr.len +
size + 3 + 8) > RUN_MAX_TXSZ) {
DPRINTF("data overflow, %u bytes\n",
m->m_pkthdr.len);
-
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
-
run_tx_free(pq, data, 1);
-
goto tr_setup;
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &data->desc, size);
usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len);
size += m->m_pkthdr.len;
/*
* Align end on a 4-byte boundary, pad 8 bytes (CRC +
* 4-byte padding), and be sure to zero those trailing
* bytes:
*/
usbd_frame_zero(pc, size, ((-size) & 3) + 8);
size += ((-size) & 3) + 8;
vap = data->ni->ni_vap;
if (ieee80211_radiotap_active_vap(vap)) {
struct run_tx_radiotap_header *tap = &sc->sc_txtap;
struct rt2860_txwi *txwi =
(struct rt2860_txwi *)(&data->desc + sizeof(struct rt2870_txd));
tap->wt_flags = 0;
tap->wt_rate = rt2860_rates[data->ridx].rate;
tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq);
tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags);
tap->wt_hwqueue = index;
if (le16toh(txwi->phy) & RT2860_PHY_SHPRE)
tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
ieee80211_radiotap_tx(vap, m);
}
DPRINTFN(11, "sending frame len=%u/%u @ index %d\n",
m->m_pkthdr.len, size, index);
usbd_xfer_set_frame_len(xfer, 0, size);
usbd_xfer_set_priv(xfer, data);
-
usbd_transfer_submit(xfer);
+ run_start(sc);
- RUN_UNLOCK(sc);
- run_start(ifp);
- RUN_LOCK(sc);
-
break;
default:
DPRINTF("USB transfer error, %s\n",
usbd_errstr(error));
data = usbd_xfer_get_priv(xfer);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
-
if (data != NULL) {
if(data->ni != NULL)
vap = data->ni->ni_vap;
run_tx_free(pq, data, error);
usbd_xfer_set_priv(xfer, NULL);
}
+
if (vap == NULL)
vap = TAILQ_FIRST(&ic->ic_vaps);
if (error != USB_ERR_CANCELLED) {
if (error == USB_ERR_TIMEOUT) {
device_printf(sc->sc_dev, "device timeout\n");
uint32_t i = RUN_CMDQ_GET(&sc->cmdq_store);
DPRINTF("cmdq_store=%d\n", i);
sc->cmdq[i].func = run_usb_timeout_cb;
sc->cmdq[i].arg0 = vap;
ieee80211_runtask(ic, &sc->cmdq_task);
}
/*
* Try to clear stall first, also if other
* errors occur, hence clearing stall
* introduces a 50 ms delay:
*/
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static void
run_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error)
{
run_bulk_tx_callbackN(xfer, error, 0);
}
static void
run_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error)
{
run_bulk_tx_callbackN(xfer, error, 1);
}
static void
run_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error)
{
run_bulk_tx_callbackN(xfer, error, 2);
}
static void
run_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error)
{
run_bulk_tx_callbackN(xfer, error, 3);
}
static void
run_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error)
{
run_bulk_tx_callbackN(xfer, error, 4);
}
static void
run_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error)
{
run_bulk_tx_callbackN(xfer, error, 5);
}
static void
run_set_tx_desc(struct run_softc *sc, struct run_tx_data *data)
{
struct mbuf *m = data->m;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = data->ni->ni_vap;
struct ieee80211_frame *wh;
struct rt2870_txd *txd;
struct rt2860_txwi *txwi;
uint16_t xferlen, txwisize;
uint16_t mcs;
uint8_t ridx = data->ridx;
uint8_t pad;
/* get MCS code from rate index */
mcs = rt2860_rates[ridx].mcs;
txwisize = (sc->mac_ver == 0x5592) ?
sizeof(*txwi) + sizeof(uint32_t) : sizeof(*txwi);
xferlen = txwisize + m->m_pkthdr.len;
/* roundup to 32-bit alignment */
xferlen = (xferlen + 3) & ~3;
txd = (struct rt2870_txd *)&data->desc;
txd->len = htole16(xferlen);
wh = mtod(m, struct ieee80211_frame *);
/*
* Ether both are true or both are false, the header
* are nicely aligned to 32-bit. So, no L2 padding.
*/
if(IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh))
pad = 0;
else
pad = 2;
/* setup TX Wireless Information */
txwi = (struct rt2860_txwi *)(txd + 1);
txwi->len = htole16(m->m_pkthdr.len - pad);
if (rt2860_rates[ridx].phy == IEEE80211_T_DS) {
mcs |= RT2860_PHY_CCK;
if (ridx != RT2860_RIDX_CCK1 &&
(ic->ic_flags & IEEE80211_F_SHPREAMBLE))
mcs |= RT2860_PHY_SHPRE;
} else
mcs |= RT2860_PHY_OFDM;
txwi->phy = htole16(mcs);
/* check if RTS/CTS or CTS-to-self protection is required */
if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
(m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold ||
((ic->ic_flags & IEEE80211_F_USEPROT) &&
rt2860_rates[ridx].phy == IEEE80211_T_OFDM)))
txwi->txop |= RT2860_TX_TXOP_HT;
else
txwi->txop |= RT2860_TX_TXOP_BACKOFF;
if (vap->iv_opmode != IEEE80211_M_STA && !IEEE80211_QOS_HAS_SEQ(wh))
txwi->xflags |= RT2860_TX_NSEQ;
}
/* This function must be called locked */
static int
run_tx(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_frame *wh;
struct ieee80211_channel *chan;
const struct ieee80211_txparam *tp;
struct run_node *rn = (void *)ni;
struct run_tx_data *data;
struct rt2870_txd *txd;
struct rt2860_txwi *txwi;
uint16_t qos;
uint16_t dur;
uint16_t qid;
uint8_t type;
uint8_t tid;
uint8_t ridx;
uint8_t ctl_ridx;
uint8_t qflags;
uint8_t xflags = 0;
int hasqos;
RUN_LOCK_ASSERT(sc, MA_OWNED);
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
/*
* There are 7 bulk endpoints: 1 for RX
* and 6 for TX (4 EDCAs + HCCA + Prio).
* Update 03-14-2009: some devices like the Planex GW-US300MiniS
* seem to have only 4 TX bulk endpoints (Fukaumi Naoki).
*/
if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) {
uint8_t *frm;
if(IEEE80211_HAS_ADDR4(wh))
frm = ((struct ieee80211_qosframe_addr4 *)wh)->i_qos;
else
frm =((struct ieee80211_qosframe *)wh)->i_qos;
qos = le16toh(*(const uint16_t *)frm);
tid = qos & IEEE80211_QOS_TID;
qid = TID_TO_WME_AC(tid);
} else {
qos = 0;
tid = 0;
qid = WME_AC_BE;
}
qflags = (qid < 4) ? RT2860_TX_QSEL_EDCA : RT2860_TX_QSEL_HCCA;
DPRINTFN(8, "qos %d\tqid %d\ttid %d\tqflags %x\n",
qos, qid, tid, qflags);
chan = (ni->ni_chan != IEEE80211_CHAN_ANYC)?ni->ni_chan:ic->ic_curchan;
tp = &vap->iv_txparms[ieee80211_chan2mode(chan)];
/* pickup a rate index */
if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
type != IEEE80211_FC0_TYPE_DATA || m->m_flags & M_EAPOL) {
ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
RT2860_RIDX_OFDM6 : RT2860_RIDX_CCK1;
ctl_ridx = rt2860_rates[ridx].ctl_ridx;
} else {
if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
ridx = rn->fix_ridx;
else
ridx = rn->amrr_ridx;
ctl_ridx = rt2860_rates[ridx].ctl_ridx;
}
if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
(!hasqos || (qos & IEEE80211_QOS_ACKPOLICY) !=
IEEE80211_QOS_ACKPOLICY_NOACK)) {
xflags |= RT2860_TX_ACK;
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
dur = rt2860_rates[ctl_ridx].sp_ack_dur;
else
dur = rt2860_rates[ctl_ridx].lp_ack_dur;
USETW(wh->i_dur, dur);
}
/* reserve slots for mgmt packets, just in case */
if (sc->sc_epq[qid].tx_nfree < 3) {
DPRINTFN(10, "tx ring %d is full\n", qid);
return (-1);
}
data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh);
STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next);
sc->sc_epq[qid].tx_nfree--;
txd = (struct rt2870_txd *)&data->desc;
txd->flags = qflags;
txwi = (struct rt2860_txwi *)(txd + 1);
txwi->xflags = xflags;
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
txwi->wcid = 0;
else
txwi->wcid = (vap->iv_opmode == IEEE80211_M_STA) ?
1 : RUN_AID2WCID(ni->ni_associd);
/* clear leftover garbage bits */
txwi->flags = 0;
txwi->txop = 0;
data->m = m;
data->ni = ni;
data->ridx = ridx;
run_set_tx_desc(sc, data);
/*
* The chip keeps track of 2 kind of Tx stats,
* * TX_STAT_FIFO, for per WCID stats, and
* * TX_STA_CNT0 for all-TX-in-one stats.
*
* To use FIFO stats, we need to store MCS into the driver-private
* PacketID field. So that, we can tell whose stats when we read them.
* We add 1 to the MCS because setting the PacketID field to 0 means
* that we don't want feedback in TX_STAT_FIFO.
* And, that's what we want for STA mode, since TX_STA_CNT0 does the job.
*
* FIFO stats doesn't count Tx with WCID 0xff, so we do this in run_tx().
*/
if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_MBSS) {
uint16_t pid = (rt2860_rates[ridx].mcs + 1) & 0xf;
txwi->len |= htole16(pid << RT2860_TX_PID_SHIFT);
/*
* Unlike PCI based devices, we don't get any interrupt from
* USB devices, so we simulate FIFO-is-full interrupt here.
* Ralink recomends to drain FIFO stats every 100 ms, but 16 slots
* quickly get fulled. To prevent overflow, increment a counter on
* every FIFO stat request, so we know how many slots are left.
* We do this only in HOSTAP or multiple vap mode since FIFO stats
* are used only in those modes.
* We just drain stats. AMRR gets updated every 1 sec by
* run_ratectl_cb() via callout.
* Call it early. Otherwise overflow.
*/
if (sc->fifo_cnt++ == 10) {
/*
* With multiple vaps or if_bridge, if_start() is called
* with a non-sleepable lock, tcpinp. So, need to defer.
*/
uint32_t i = RUN_CMDQ_GET(&sc->cmdq_store);
DPRINTFN(6, "cmdq_store=%d\n", i);
sc->cmdq[i].func = run_drain_fifo;
sc->cmdq[i].arg0 = sc;
ieee80211_runtask(ic, &sc->cmdq_task);
}
}
STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next);
usbd_transfer_start(sc->sc_xfer[qid]);
DPRINTFN(8, "sending data frame len=%d rate=%d qid=%d\n",
m->m_pkthdr.len + (int)(sizeof(struct rt2870_txd) +
sizeof(struct rt2860_txwi)), rt2860_rates[ridx].rate, qid);
return (0);
}
static int
run_tx_mgt(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct run_node *rn = (void *)ni;
struct run_tx_data *data;
struct ieee80211_frame *wh;
struct rt2870_txd *txd;
struct rt2860_txwi *txwi;
uint16_t dur;
uint8_t ridx = rn->mgt_ridx;
uint8_t type;
uint8_t xflags = 0;
uint8_t wflags = 0;
RUN_LOCK_ASSERT(sc, MA_OWNED);
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
/* tell hardware to add timestamp for probe responses */
if ((wh->i_fc[0] &
(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
(IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
wflags |= RT2860_TX_TS;
else if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
xflags |= RT2860_TX_ACK;
dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate,
ic->ic_flags & IEEE80211_F_SHPREAMBLE);
USETW(wh->i_dur, dur);
}
- if (sc->sc_epq[0].tx_nfree == 0) {
+ if (sc->sc_epq[0].tx_nfree == 0)
/* let caller free mbuf */
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
return (EIO);
- }
data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
sc->sc_epq[0].tx_nfree--;
txd = (struct rt2870_txd *)&data->desc;
txd->flags = RT2860_TX_QSEL_EDCA;
txwi = (struct rt2860_txwi *)(txd + 1);
txwi->wcid = 0xff;
txwi->flags = wflags;
txwi->xflags = xflags;
txwi->txop = 0; /* clear leftover garbage bits */
data->m = m;
data->ni = ni;
data->ridx = ridx;
run_set_tx_desc(sc, data);
DPRINTFN(10, "sending mgt frame len=%d rate=%d\n", m->m_pkthdr.len +
(int)(sizeof(struct rt2870_txd) + sizeof(struct rt2860_txwi)),
rt2860_rates[ridx].rate);
STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
usbd_transfer_start(sc->sc_xfer[0]);
return (0);
}
static int
run_sendprot(struct run_softc *sc,
const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate)
{
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_frame *wh;
struct run_tx_data *data;
struct rt2870_txd *txd;
struct rt2860_txwi *txwi;
struct mbuf *mprot;
int ridx;
int protrate;
int ackrate;
int pktlen;
int isshort;
uint16_t dur;
uint8_t type;
uint8_t wflags = 0;
uint8_t xflags = 0;
RUN_LOCK_ASSERT(sc, MA_OWNED);
KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY,
("protection %d", prot));
wh = mtod(m, struct ieee80211_frame *);
pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN;
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
ackrate = ieee80211_ack_rate(ic->ic_rt, rate);
isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0;
dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort)
+ ieee80211_ack_duration(ic->ic_rt, rate, isshort);
wflags = RT2860_TX_FRAG;
/* check that there are free slots before allocating the mbuf */
- if (sc->sc_epq[0].tx_nfree == 0) {
+ if (sc->sc_epq[0].tx_nfree == 0)
/* let caller free mbuf */
- sc->sc_ifp->if_drv_flags |= IFF_DRV_OACTIVE;
return (ENOBUFS);
- }
if (prot == IEEE80211_PROT_RTSCTS) {
/* NB: CTS is the same size as an ACK */
dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort);
xflags |= RT2860_TX_ACK;
mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur);
} else {
mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur);
}
if (mprot == NULL) {
- if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
DPRINTF("could not allocate mbuf\n");
return (ENOBUFS);
}
data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
sc->sc_epq[0].tx_nfree--;
txd = (struct rt2870_txd *)&data->desc;
txd->flags = RT2860_TX_QSEL_EDCA;
txwi = (struct rt2860_txwi *)(txd + 1);
txwi->wcid = 0xff;
txwi->flags = wflags;
txwi->xflags = xflags;
txwi->txop = 0; /* clear leftover garbage bits */
data->m = mprot;
data->ni = ieee80211_ref_node(ni);
for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++)
if (rt2860_rates[ridx].rate == protrate)
break;
data->ridx = ridx;
run_set_tx_desc(sc, data);
DPRINTFN(1, "sending prot len=%u rate=%u\n",
m->m_pkthdr.len, rate);
STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
usbd_transfer_start(sc->sc_xfer[0]);
return (0);
}
static int
run_tx_param(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_frame *wh;
struct run_tx_data *data;
struct rt2870_txd *txd;
struct rt2860_txwi *txwi;
uint8_t type;
uint8_t ridx;
uint8_t rate;
uint8_t opflags = 0;
uint8_t xflags = 0;
int error;
RUN_LOCK_ASSERT(sc, MA_OWNED);
KASSERT(params != NULL, ("no raw xmit params"));
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
rate = params->ibp_rate0;
if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
/* let caller free mbuf */
return (EINVAL);
}
if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
xflags |= RT2860_TX_ACK;
if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) {
error = run_sendprot(sc, m, ni,
params->ibp_flags & IEEE80211_BPF_RTS ?
IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY,
rate);
if (error) {
/* let caller free mbuf */
return error;
}
opflags |= /*XXX RT2573_TX_LONG_RETRY |*/ RT2860_TX_TXOP_SIFS;
}
if (sc->sc_epq[0].tx_nfree == 0) {
/* let caller free mbuf */
- sc->sc_ifp->if_drv_flags |= IFF_DRV_OACTIVE;
DPRINTF("sending raw frame, but tx ring is full\n");
return (EIO);
}
data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
sc->sc_epq[0].tx_nfree--;
txd = (struct rt2870_txd *)&data->desc;
txd->flags = RT2860_TX_QSEL_EDCA;
txwi = (struct rt2860_txwi *)(txd + 1);
txwi->wcid = 0xff;
txwi->xflags = xflags;
txwi->txop = opflags;
txwi->flags = 0; /* clear leftover garbage bits */
data->m = m;
data->ni = ni;
for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++)
if (rt2860_rates[ridx].rate == rate)
break;
data->ridx = ridx;
run_set_tx_desc(sc, data);
DPRINTFN(10, "sending raw frame len=%u rate=%u\n",
m->m_pkthdr.len, rate);
STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
usbd_transfer_start(sc->sc_xfer[0]);
return (0);
}
static int
run_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
- struct ifnet *ifp = ni->ni_ic->ic_ifp;
struct run_softc *sc = ni->ni_ic->ic_softc;
int error = 0;
RUN_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- error = ENETDOWN;
+ if (!(sc->sc_flags & RUN_RUNNING)) {
+ error = ENETDOWN;
goto done;
}
if (params == NULL) {
/* tx mgt packet */
if ((error = run_tx_mgt(sc, m, ni)) != 0) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
DPRINTF("mgt tx failed\n");
goto done;
}
} else {
/* tx raw packet with param */
if ((error = run_tx_param(sc, m, ni, params)) != 0) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
DPRINTF("tx with param failed\n");
goto done;
}
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
done:
RUN_UNLOCK(sc);
if (error != 0) {
if(m != NULL)
m_freem(m);
ieee80211_free_node(ni);
}
return (error);
}
+static int
+run_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct run_softc *sc = ic->ic_softc;
+ int error;
+
+ RUN_LOCK(sc);
+ if ((sc->sc_flags & RUN_RUNNING) == 0) {
+ RUN_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ RUN_UNLOCK(sc);
+ return (error);
+ }
+ run_start(sc);
+ RUN_UNLOCK(sc);
+
+ return (0);
+}
+
static void
-run_start(struct ifnet *ifp)
+run_start(struct run_softc *sc)
{
- struct run_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
- RUN_LOCK(sc);
+ RUN_LOCK_ASSERT(sc, MA_OWNED);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- RUN_UNLOCK(sc);
+ if ((sc->sc_flags & RUN_RUNNING) == 0)
return;
- }
- for (;;) {
- /* send data frames */
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
-
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
if (run_tx(sc, m, ni) != 0) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+ mbufq_prepend(&sc->sc_snd, m);
break;
}
}
-
- RUN_UNLOCK(sc);
}
-static int
-run_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+run_parent(struct ieee80211com *ic)
{
- struct ieee80211com *ic = ifp->if_l2com;
struct run_softc *sc = ic->ic_softc;
- struct ifreq *ifr = (struct ifreq *) data;
int startall = 0;
- int error;
RUN_LOCK(sc);
- error = sc->sc_detached ? ENXIO : 0;
- RUN_UNLOCK(sc);
- if (error)
- return (error);
-
- switch (cmd) {
- case SIOCSIFFLAGS:
- RUN_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)){
- startall = 1;
- run_init_locked(sc);
- } else
- run_update_promisc_locked(sc);
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
- (ic->ic_nrunning == 0 || sc->rvp_cnt <= 1)) {
- run_stop(sc);
- }
- }
+ if (sc->sc_detached) {
RUN_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
+ return;
}
- return (error);
+ if (ic->ic_nrunning > 0) {
+ if (!(sc->sc_flags & RUN_RUNNING)) {
+ startall = 1;
+ run_init_locked(sc);
+ } else
+ run_update_promisc_locked(sc);
+ } else if ((sc->sc_flags & RUN_RUNNING) && sc->rvp_cnt <= 1)
+ run_stop(sc);
+ RUN_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static void
run_iq_calib(struct run_softc *sc, u_int chan)
{
uint16_t val;
/* Tx0 IQ gain. */
run_bbp_write(sc, 158, 0x2c);
if (chan <= 14)
run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX0_2GHZ, &val, 1);
else if (chan <= 64) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5GHZ,
&val, 1);
} else if (chan <= 138) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5GHZ,
&val, 1);
} else if (chan <= 165) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5GHZ,
&val, 1);
} else
val = 0;
run_bbp_write(sc, 159, val);
/* Tx0 IQ phase. */
run_bbp_write(sc, 158, 0x2d);
if (chan <= 14) {
run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX0_2GHZ,
&val, 1);
} else if (chan <= 64) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5GHZ,
&val, 1);
} else if (chan <= 138) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5GHZ,
&val, 1);
} else if (chan <= 165) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5GHZ,
&val, 1);
} else
val = 0;
run_bbp_write(sc, 159, val);
/* Tx1 IQ gain. */
run_bbp_write(sc, 158, 0x4a);
if (chan <= 14) {
run_efuse_read(sc, RT5390_EEPROM_IQ_GAIN_CAL_TX1_2GHZ,
&val, 1);
} else if (chan <= 64) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5GHZ,
&val, 1);
} else if (chan <= 138) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5GHZ,
&val, 1);
} else if (chan <= 165) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5GHZ,
&val, 1);
} else
val = 0;
run_bbp_write(sc, 159, val);
/* Tx1 IQ phase. */
run_bbp_write(sc, 158, 0x4b);
if (chan <= 14) {
run_efuse_read(sc, RT5390_EEPROM_IQ_PHASE_CAL_TX1_2GHZ,
&val, 1);
} else if (chan <= 64) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5GHZ,
&val, 1);
} else if (chan <= 138) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5GHZ,
&val, 1);
} else if (chan <= 165) {
run_efuse_read(sc,
RT5390_EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5GHZ,
&val, 1);
} else
val = 0;
run_bbp_write(sc, 159, val);
/* RF IQ compensation control. */
run_bbp_write(sc, 158, 0x04);
run_efuse_read(sc, RT5390_EEPROM_RF_IQ_COMPENSATION_CTL,
&val, 1);
run_bbp_write(sc, 159, val);
/* RF IQ imbalance compensation control. */
run_bbp_write(sc, 158, 0x03);
run_efuse_read(sc,
RT5390_EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CTL, &val, 1);
run_bbp_write(sc, 159, val);
}
static void
run_set_agc(struct run_softc *sc, uint8_t agc)
{
uint8_t bbp;
if (sc->mac_ver == 0x3572) {
run_bbp_read(sc, 27, &bbp);
bbp &= ~(0x3 << 5);
run_bbp_write(sc, 27, bbp | 0 << 5); /* select Rx0 */
run_bbp_write(sc, 66, agc);
run_bbp_write(sc, 27, bbp | 1 << 5); /* select Rx1 */
run_bbp_write(sc, 66, agc);
} else
run_bbp_write(sc, 66, agc);
}
static void
run_select_chan_group(struct run_softc *sc, int group)
{
uint32_t tmp;
uint8_t agc;
run_bbp_write(sc, 62, 0x37 - sc->lna[group]);
run_bbp_write(sc, 63, 0x37 - sc->lna[group]);
run_bbp_write(sc, 64, 0x37 - sc->lna[group]);
if (sc->mac_ver < 0x3572)
run_bbp_write(sc, 86, 0x00);
if (sc->mac_ver == 0x3593) {
run_bbp_write(sc, 77, 0x98);
run_bbp_write(sc, 83, (group == 0) ? 0x8a : 0x9a);
}
if (group == 0) {
if (sc->ext_2ghz_lna) {
if (sc->mac_ver >= 0x5390)
run_bbp_write(sc, 75, 0x52);
else {
run_bbp_write(sc, 82, 0x62);
run_bbp_write(sc, 75, 0x46);
}
} else {
if (sc->mac_ver == 0x5592) {
run_bbp_write(sc, 79, 0x1c);
run_bbp_write(sc, 80, 0x0e);
run_bbp_write(sc, 81, 0x3a);
run_bbp_write(sc, 82, 0x62);
run_bbp_write(sc, 195, 0x80);
run_bbp_write(sc, 196, 0xe0);
run_bbp_write(sc, 195, 0x81);
run_bbp_write(sc, 196, 0x1f);
run_bbp_write(sc, 195, 0x82);
run_bbp_write(sc, 196, 0x38);
run_bbp_write(sc, 195, 0x83);
run_bbp_write(sc, 196, 0x32);
run_bbp_write(sc, 195, 0x85);
run_bbp_write(sc, 196, 0x28);
run_bbp_write(sc, 195, 0x86);
run_bbp_write(sc, 196, 0x19);
} else if (sc->mac_ver >= 0x5390)
run_bbp_write(sc, 75, 0x50);
else {
run_bbp_write(sc, 82,
(sc->mac_ver == 0x3593) ? 0x62 : 0x84);
run_bbp_write(sc, 75, 0x50);
}
}
} else {
if (sc->mac_ver == 0x5592) {
run_bbp_write(sc, 79, 0x18);
run_bbp_write(sc, 80, 0x08);
run_bbp_write(sc, 81, 0x38);
run_bbp_write(sc, 82, 0x92);
run_bbp_write(sc, 195, 0x80);
run_bbp_write(sc, 196, 0xf0);
run_bbp_write(sc, 195, 0x81);
run_bbp_write(sc, 196, 0x1e);
run_bbp_write(sc, 195, 0x82);
run_bbp_write(sc, 196, 0x28);
run_bbp_write(sc, 195, 0x83);
run_bbp_write(sc, 196, 0x20);
run_bbp_write(sc, 195, 0x85);
run_bbp_write(sc, 196, 0x7f);
run_bbp_write(sc, 195, 0x86);
run_bbp_write(sc, 196, 0x7f);
} else if (sc->mac_ver == 0x3572)
run_bbp_write(sc, 82, 0x94);
else
run_bbp_write(sc, 82,
(sc->mac_ver == 0x3593) ? 0x82 : 0xf2);
if (sc->ext_5ghz_lna)
run_bbp_write(sc, 75, 0x46);
else
run_bbp_write(sc, 75, 0x50);
}
run_read(sc, RT2860_TX_BAND_CFG, &tmp);
tmp &= ~(RT2860_5G_BAND_SEL_N | RT2860_5G_BAND_SEL_P);
tmp |= (group == 0) ? RT2860_5G_BAND_SEL_N : RT2860_5G_BAND_SEL_P;
run_write(sc, RT2860_TX_BAND_CFG, tmp);
/* enable appropriate Power Amplifiers and Low Noise Amplifiers */
tmp = RT2860_RFTR_EN | RT2860_TRSW_EN | RT2860_LNA_PE0_EN;
if (sc->mac_ver == 0x3593)
tmp |= 1 << 29 | 1 << 28;
if (sc->nrxchains > 1)
tmp |= RT2860_LNA_PE1_EN;
if (group == 0) { /* 2GHz */
tmp |= RT2860_PA_PE_G0_EN;
if (sc->ntxchains > 1)
tmp |= RT2860_PA_PE_G1_EN;
if (sc->mac_ver == 0x3593) {
if (sc->ntxchains > 2)
tmp |= 1 << 25;
}
} else { /* 5GHz */
tmp |= RT2860_PA_PE_A0_EN;
if (sc->ntxchains > 1)
tmp |= RT2860_PA_PE_A1_EN;
}
if (sc->mac_ver == 0x3572) {
run_rt3070_rf_write(sc, 8, 0x00);
run_write(sc, RT2860_TX_PIN_CFG, tmp);
run_rt3070_rf_write(sc, 8, 0x80);
} else
run_write(sc, RT2860_TX_PIN_CFG, tmp);
if (sc->mac_ver == 0x5592) {
run_bbp_write(sc, 195, 0x8d);
run_bbp_write(sc, 196, 0x1a);
}
if (sc->mac_ver == 0x3593) {
run_read(sc, RT2860_GPIO_CTRL, &tmp);
tmp &= ~0x01010000;
if (group == 0)
tmp |= 0x00010000;
tmp = (tmp & ~0x00009090) | 0x00000090;
run_write(sc, RT2860_GPIO_CTRL, tmp);
}
/* set initial AGC value */
if (group == 0) { /* 2GHz band */
if (sc->mac_ver >= 0x3070)
agc = 0x1c + sc->lna[0] * 2;
else
agc = 0x2e + sc->lna[0];
} else { /* 5GHz band */
if (sc->mac_ver == 0x5592)
agc = 0x24 + sc->lna[group] * 2;
else if (sc->mac_ver == 0x3572 || sc->mac_ver == 0x3593)
agc = 0x22 + (sc->lna[group] * 5) / 3;
else
agc = 0x32 + (sc->lna[group] * 5) / 3;
}
run_set_agc(sc, agc);
}
static void
run_rt2870_set_chan(struct run_softc *sc, u_int chan)
{
const struct rfprog *rfprog = rt2860_rf2850;
uint32_t r2, r3, r4;
int8_t txpow1, txpow2;
int i;
/* find the settings for this channel (we know it exists) */
for (i = 0; rfprog[i].chan != chan; i++);
r2 = rfprog[i].r2;
if (sc->ntxchains == 1)
r2 |= 1 << 14; /* 1T: disable Tx chain 2 */
if (sc->nrxchains == 1)
r2 |= 1 << 17 | 1 << 6; /* 1R: disable Rx chains 2 & 3 */
else if (sc->nrxchains == 2)
r2 |= 1 << 6; /* 2R: disable Rx chain 3 */
/* use Tx power values from EEPROM */
txpow1 = sc->txpow1[i];
txpow2 = sc->txpow2[i];
/* Initialize RF R3 and R4. */
r3 = rfprog[i].r3 & 0xffffc1ff;
r4 = (rfprog[i].r4 & ~(0x001f87c0)) | (sc->freq << 15);
if (chan > 14) {
if (txpow1 >= 0) {
txpow1 = (txpow1 > 0xf) ? (0xf) : (txpow1);
r3 |= (txpow1 << 10) | (1 << 9);
} else {
txpow1 += 7;
/* txpow1 is not possible larger than 15. */
r3 |= (txpow1 << 10);
}
if (txpow2 >= 0) {
txpow2 = (txpow2 > 0xf) ? (0xf) : (txpow2);
r4 |= (txpow2 << 7) | (1 << 6);
} else {
txpow2 += 7;
r4 |= (txpow2 << 7);
}
} else {
/* Set Tx0 power. */
r3 |= (txpow1 << 9);
/* Set frequency offset and Tx1 power. */
r4 |= (txpow2 << 6);
}
run_rt2870_rf_write(sc, rfprog[i].r1);
run_rt2870_rf_write(sc, r2);
run_rt2870_rf_write(sc, r3 & ~(1 << 2));
run_rt2870_rf_write(sc, r4);
run_delay(sc, 10);
run_rt2870_rf_write(sc, rfprog[i].r1);
run_rt2870_rf_write(sc, r2);
run_rt2870_rf_write(sc, r3 | (1 << 2));
run_rt2870_rf_write(sc, r4);
run_delay(sc, 10);
run_rt2870_rf_write(sc, rfprog[i].r1);
run_rt2870_rf_write(sc, r2);
run_rt2870_rf_write(sc, r3 & ~(1 << 2));
run_rt2870_rf_write(sc, r4);
}
static void
run_rt3070_set_chan(struct run_softc *sc, u_int chan)
{
int8_t txpow1, txpow2;
uint8_t rf;
int i;
/* find the settings for this channel (we know it exists) */
for (i = 0; rt2860_rf2850[i].chan != chan; i++);
/* use Tx power values from EEPROM */
txpow1 = sc->txpow1[i];
txpow2 = sc->txpow2[i];
run_rt3070_rf_write(sc, 2, rt3070_freqs[i].n);
/* RT3370/RT3390: RF R3 [7:4] is not reserved bits. */
run_rt3070_rf_read(sc, 3, &rf);
rf = (rf & ~0x0f) | rt3070_freqs[i].k;
run_rt3070_rf_write(sc, 3, rf);
run_rt3070_rf_read(sc, 6, &rf);
rf = (rf & ~0x03) | rt3070_freqs[i].r;
run_rt3070_rf_write(sc, 6, rf);
/* set Tx0 power */
run_rt3070_rf_read(sc, 12, &rf);
rf = (rf & ~0x1f) | txpow1;
run_rt3070_rf_write(sc, 12, rf);
/* set Tx1 power */
run_rt3070_rf_read(sc, 13, &rf);
rf = (rf & ~0x1f) | txpow2;
run_rt3070_rf_write(sc, 13, rf);
run_rt3070_rf_read(sc, 1, &rf);
rf &= ~0xfc;
if (sc->ntxchains == 1)
rf |= 1 << 7 | 1 << 5; /* 1T: disable Tx chains 2 & 3 */
else if (sc->ntxchains == 2)
rf |= 1 << 7; /* 2T: disable Tx chain 3 */
if (sc->nrxchains == 1)
rf |= 1 << 6 | 1 << 4; /* 1R: disable Rx chains 2 & 3 */
else if (sc->nrxchains == 2)
rf |= 1 << 6; /* 2R: disable Rx chain 3 */
run_rt3070_rf_write(sc, 1, rf);
/* set RF offset */
run_rt3070_rf_read(sc, 23, &rf);
rf = (rf & ~0x7f) | sc->freq;
run_rt3070_rf_write(sc, 23, rf);
/* program RF filter */
run_rt3070_rf_read(sc, 24, &rf); /* Tx */
rf = (rf & ~0x3f) | sc->rf24_20mhz;
run_rt3070_rf_write(sc, 24, rf);
run_rt3070_rf_read(sc, 31, &rf); /* Rx */
rf = (rf & ~0x3f) | sc->rf24_20mhz;
run_rt3070_rf_write(sc, 31, rf);
/* enable RF tuning */
run_rt3070_rf_read(sc, 7, &rf);
run_rt3070_rf_write(sc, 7, rf | 0x01);
}
static void
run_rt3572_set_chan(struct run_softc *sc, u_int chan)
{
int8_t txpow1, txpow2;
uint32_t tmp;
uint8_t rf;
int i;
/* find the settings for this channel (we know it exists) */
for (i = 0; rt2860_rf2850[i].chan != chan; i++);
/* use Tx power values from EEPROM */
txpow1 = sc->txpow1[i];
txpow2 = sc->txpow2[i];
if (chan <= 14) {
run_bbp_write(sc, 25, sc->bbp25);
run_bbp_write(sc, 26, sc->bbp26);
} else {
/* enable IQ phase correction */
run_bbp_write(sc, 25, 0x09);
run_bbp_write(sc, 26, 0xff);
}
run_rt3070_rf_write(sc, 2, rt3070_freqs[i].n);
run_rt3070_rf_write(sc, 3, rt3070_freqs[i].k);
run_rt3070_rf_read(sc, 6, &rf);
rf = (rf & ~0x0f) | rt3070_freqs[i].r;
rf |= (chan <= 14) ? 0x08 : 0x04;
run_rt3070_rf_write(sc, 6, rf);
/* set PLL mode */
run_rt3070_rf_read(sc, 5, &rf);
rf &= ~(0x08 | 0x04);
rf |= (chan <= 14) ? 0x04 : 0x08;
run_rt3070_rf_write(sc, 5, rf);
/* set Tx power for chain 0 */
if (chan <= 14)
rf = 0x60 | txpow1;
else
rf = 0xe0 | (txpow1 & 0xc) << 1 | (txpow1 & 0x3);
run_rt3070_rf_write(sc, 12, rf);
/* set Tx power for chain 1 */
if (chan <= 14)
rf = 0x60 | txpow2;
else
rf = 0xe0 | (txpow2 & 0xc) << 1 | (txpow2 & 0x3);
run_rt3070_rf_write(sc, 13, rf);
/* set Tx/Rx streams */
run_rt3070_rf_read(sc, 1, &rf);
rf &= ~0xfc;
if (sc->ntxchains == 1)
rf |= 1 << 7 | 1 << 5; /* 1T: disable Tx chains 2 & 3 */
else if (sc->ntxchains == 2)
rf |= 1 << 7; /* 2T: disable Tx chain 3 */
if (sc->nrxchains == 1)
rf |= 1 << 6 | 1 << 4; /* 1R: disable Rx chains 2 & 3 */
else if (sc->nrxchains == 2)
rf |= 1 << 6; /* 2R: disable Rx chain 3 */
run_rt3070_rf_write(sc, 1, rf);
/* set RF offset */
run_rt3070_rf_read(sc, 23, &rf);
rf = (rf & ~0x7f) | sc->freq;
run_rt3070_rf_write(sc, 23, rf);
/* program RF filter */
rf = sc->rf24_20mhz;
run_rt3070_rf_write(sc, 24, rf); /* Tx */
run_rt3070_rf_write(sc, 31, rf); /* Rx */
/* enable RF tuning */
run_rt3070_rf_read(sc, 7, &rf);
rf = (chan <= 14) ? 0xd8 : ((rf & ~0xc8) | 0x14);
run_rt3070_rf_write(sc, 7, rf);
/* TSSI */
rf = (chan <= 14) ? 0xc3 : 0xc0;
run_rt3070_rf_write(sc, 9, rf);
/* set loop filter 1 */
run_rt3070_rf_write(sc, 10, 0xf1);
/* set loop filter 2 */
run_rt3070_rf_write(sc, 11, (chan <= 14) ? 0xb9 : 0x00);
/* set tx_mx2_ic */
run_rt3070_rf_write(sc, 15, (chan <= 14) ? 0x53 : 0x43);
/* set tx_mx1_ic */
if (chan <= 14)
rf = 0x48 | sc->txmixgain_2ghz;
else
rf = 0x78 | sc->txmixgain_5ghz;
run_rt3070_rf_write(sc, 16, rf);
/* set tx_lo1 */
run_rt3070_rf_write(sc, 17, 0x23);
/* set tx_lo2 */
if (chan <= 14)
rf = 0x93;
else if (chan <= 64)
rf = 0xb7;
else if (chan <= 128)
rf = 0x74;
else
rf = 0x72;
run_rt3070_rf_write(sc, 19, rf);
/* set rx_lo1 */
if (chan <= 14)
rf = 0xb3;
else if (chan <= 64)
rf = 0xf6;
else if (chan <= 128)
rf = 0xf4;
else
rf = 0xf3;
run_rt3070_rf_write(sc, 20, rf);
/* set pfd_delay */
if (chan <= 14)
rf = 0x15;
else if (chan <= 64)
rf = 0x3d;
else
rf = 0x01;
run_rt3070_rf_write(sc, 25, rf);
/* set rx_lo2 */
run_rt3070_rf_write(sc, 26, (chan <= 14) ? 0x85 : 0x87);
/* set ldo_rf_vc */
run_rt3070_rf_write(sc, 27, (chan <= 14) ? 0x00 : 0x01);
/* set drv_cc */
run_rt3070_rf_write(sc, 29, (chan <= 14) ? 0x9b : 0x9f);
run_read(sc, RT2860_GPIO_CTRL, &tmp);
tmp &= ~0x8080;
if (chan <= 14)
tmp |= 0x80;
run_write(sc, RT2860_GPIO_CTRL, tmp);
/* enable RF tuning */
run_rt3070_rf_read(sc, 7, &rf);
run_rt3070_rf_write(sc, 7, rf | 0x01);
run_delay(sc, 2);
}
static void
run_rt3593_set_chan(struct run_softc *sc, u_int chan)
{
int8_t txpow1, txpow2, txpow3;
uint8_t h20mhz, rf;
int i;
/* find the settings for this channel (we know it exists) */
for (i = 0; rt2860_rf2850[i].chan != chan; i++);
/* use Tx power values from EEPROM */
txpow1 = sc->txpow1[i];
txpow2 = sc->txpow2[i];
txpow3 = (sc->ntxchains == 3) ? sc->txpow3[i] : 0;
if (chan <= 14) {
run_bbp_write(sc, 25, sc->bbp25);
run_bbp_write(sc, 26, sc->bbp26);
} else {
/* Enable IQ phase correction. */
run_bbp_write(sc, 25, 0x09);
run_bbp_write(sc, 26, 0xff);
}
run_rt3070_rf_write(sc, 8, rt3070_freqs[i].n);
run_rt3070_rf_write(sc, 9, rt3070_freqs[i].k & 0x0f);
run_rt3070_rf_read(sc, 11, &rf);
rf = (rf & ~0x03) | (rt3070_freqs[i].r & 0x03);
run_rt3070_rf_write(sc, 11, rf);
/* Set pll_idoh. */
run_rt3070_rf_read(sc, 11, &rf);
rf &= ~0x4c;
rf |= (chan <= 14) ? 0x44 : 0x48;
run_rt3070_rf_write(sc, 11, rf);
if (chan <= 14)
rf = txpow1 & 0x1f;
else
rf = 0x40 | ((txpow1 & 0x18) << 1) | (txpow1 & 0x07);
run_rt3070_rf_write(sc, 53, rf);
if (chan <= 14)
rf = txpow2 & 0x1f;
else
rf = 0x40 | ((txpow2 & 0x18) << 1) | (txpow2 & 0x07);
run_rt3070_rf_write(sc, 55, rf);
if (chan <= 14)
rf = txpow3 & 0x1f;
else
rf = 0x40 | ((txpow3 & 0x18) << 1) | (txpow3 & 0x07);
run_rt3070_rf_write(sc, 54, rf);
rf = RT3070_RF_BLOCK | RT3070_PLL_PD;
if (sc->ntxchains == 3)
rf |= RT3070_TX0_PD | RT3070_TX1_PD | RT3070_TX2_PD;
else
rf |= RT3070_TX0_PD | RT3070_TX1_PD;
rf |= RT3070_RX0_PD | RT3070_RX1_PD | RT3070_RX2_PD;
run_rt3070_rf_write(sc, 1, rf);
run_adjust_freq_offset(sc);
run_rt3070_rf_write(sc, 31, (chan <= 14) ? 0xa0 : 0x80);
h20mhz = (sc->rf24_20mhz & 0x20) >> 5;
run_rt3070_rf_read(sc, 30, &rf);
rf = (rf & ~0x06) | (h20mhz << 1) | (h20mhz << 2);
run_rt3070_rf_write(sc, 30, rf);
run_rt3070_rf_read(sc, 36, &rf);
if (chan <= 14)
rf |= 0x80;
else
rf &= ~0x80;
run_rt3070_rf_write(sc, 36, rf);
/* Set vcolo_bs. */
run_rt3070_rf_write(sc, 34, (chan <= 14) ? 0x3c : 0x20);
/* Set pfd_delay. */
run_rt3070_rf_write(sc, 12, (chan <= 14) ? 0x1a : 0x12);
/* Set vco bias current control. */
run_rt3070_rf_read(sc, 6, &rf);
rf &= ~0xc0;
if (chan <= 14)
rf |= 0x40;
else if (chan <= 128)
rf |= 0x80;
else
rf |= 0x40;
run_rt3070_rf_write(sc, 6, rf);
run_rt3070_rf_read(sc, 30, &rf);
rf = (rf & ~0x18) | 0x10;
run_rt3070_rf_write(sc, 30, rf);
run_rt3070_rf_write(sc, 10, (chan <= 14) ? 0xd3 : 0xd8);
run_rt3070_rf_write(sc, 13, (chan <= 14) ? 0x12 : 0x23);
run_rt3070_rf_read(sc, 51, &rf);
rf = (rf & ~0x03) | 0x01;
run_rt3070_rf_write(sc, 51, rf);
/* Set tx_mx1_cc. */
run_rt3070_rf_read(sc, 51, &rf);
rf &= ~0x1c;
rf |= (chan <= 14) ? 0x14 : 0x10;
run_rt3070_rf_write(sc, 51, rf);
/* Set tx_mx1_ic. */
run_rt3070_rf_read(sc, 51, &rf);
rf &= ~0xe0;
rf |= (chan <= 14) ? 0x60 : 0x40;
run_rt3070_rf_write(sc, 51, rf);
/* Set tx_lo1_ic. */
run_rt3070_rf_read(sc, 49, &rf);
rf &= ~0x1c;
rf |= (chan <= 14) ? 0x0c : 0x08;
run_rt3070_rf_write(sc, 49, rf);
/* Set tx_lo1_en. */
run_rt3070_rf_read(sc, 50, &rf);
run_rt3070_rf_write(sc, 50, rf & ~0x20);
/* Set drv_cc. */
run_rt3070_rf_read(sc, 57, &rf);
rf &= ~0xfc;
rf |= (chan <= 14) ? 0x6c : 0x3c;
run_rt3070_rf_write(sc, 57, rf);
/* Set rx_mix1_ic, rxa_lnactr, lna_vc, lna_inbias_en and lna_en. */
run_rt3070_rf_write(sc, 44, (chan <= 14) ? 0x93 : 0x9b);
/* Set drv_gnd_a, tx_vga_cc_a and tx_mx2_gain. */
run_rt3070_rf_write(sc, 52, (chan <= 14) ? 0x45 : 0x05);
/* Enable VCO calibration. */
run_rt3070_rf_read(sc, 3, &rf);
rf &= ~RT5390_VCOCAL;
rf |= (chan <= 14) ? RT5390_VCOCAL : 0xbe;
run_rt3070_rf_write(sc, 3, rf);
if (chan <= 14)
rf = 0x23;
else if (chan <= 64)
rf = 0x36;
else if (chan <= 128)
rf = 0x32;
else
rf = 0x30;
run_rt3070_rf_write(sc, 39, rf);
if (chan <= 14)
rf = 0xbb;
else if (chan <= 64)
rf = 0xeb;
else if (chan <= 128)
rf = 0xb3;
else
rf = 0x9b;
run_rt3070_rf_write(sc, 45, rf);
/* Set FEQ/AEQ control. */
run_bbp_write(sc, 105, 0x34);
}
static void
run_rt5390_set_chan(struct run_softc *sc, u_int chan)
{
int8_t txpow1, txpow2;
uint8_t rf;
int i;
/* find the settings for this channel (we know it exists) */
for (i = 0; rt2860_rf2850[i].chan != chan; i++);
/* use Tx power values from EEPROM */
txpow1 = sc->txpow1[i];
txpow2 = sc->txpow2[i];
run_rt3070_rf_write(sc, 8, rt3070_freqs[i].n);
run_rt3070_rf_write(sc, 9, rt3070_freqs[i].k & 0x0f);
run_rt3070_rf_read(sc, 11, &rf);
rf = (rf & ~0x03) | (rt3070_freqs[i].r & 0x03);
run_rt3070_rf_write(sc, 11, rf);
run_rt3070_rf_read(sc, 49, &rf);
rf = (rf & ~0x3f) | (txpow1 & 0x3f);
/* The valid range of the RF R49 is 0x00 to 0x27. */
if ((rf & 0x3f) > 0x27)
rf = (rf & ~0x3f) | 0x27;
run_rt3070_rf_write(sc, 49, rf);
if (sc->mac_ver == 0x5392) {
run_rt3070_rf_read(sc, 50, &rf);
rf = (rf & ~0x3f) | (txpow2 & 0x3f);
/* The valid range of the RF R50 is 0x00 to 0x27. */
if ((rf & 0x3f) > 0x27)
rf = (rf & ~0x3f) | 0x27;
run_rt3070_rf_write(sc, 50, rf);
}
run_rt3070_rf_read(sc, 1, &rf);
rf |= RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD | RT3070_TX0_PD;
if (sc->mac_ver == 0x5392)
rf |= RT3070_RX1_PD | RT3070_TX1_PD;
run_rt3070_rf_write(sc, 1, rf);
if (sc->mac_ver != 0x5392) {
run_rt3070_rf_read(sc, 2, &rf);
rf |= 0x80;
run_rt3070_rf_write(sc, 2, rf);
run_delay(sc, 10);
rf &= 0x7f;
run_rt3070_rf_write(sc, 2, rf);
}
run_adjust_freq_offset(sc);
if (sc->mac_ver == 0x5392) {
/* Fix for RT5392C. */
if (sc->mac_rev >= 0x0223) {
if (chan <= 4)
rf = 0x0f;
else if (chan >= 5 && chan <= 7)
rf = 0x0e;
else
rf = 0x0d;
run_rt3070_rf_write(sc, 23, rf);
if (chan <= 4)
rf = 0x0c;
else if (chan == 5)
rf = 0x0b;
else if (chan >= 6 && chan <= 7)
rf = 0x0a;
else if (chan >= 8 && chan <= 10)
rf = 0x09;
else
rf = 0x08;
run_rt3070_rf_write(sc, 59, rf);
} else {
if (chan <= 11)
rf = 0x0f;
else
rf = 0x0b;
run_rt3070_rf_write(sc, 59, rf);
}
} else {
/* Fix for RT5390F. */
if (sc->mac_rev >= 0x0502) {
if (chan <= 11)
rf = 0x43;
else
rf = 0x23;
run_rt3070_rf_write(sc, 55, rf);
if (chan <= 11)
rf = 0x0f;
else if (chan == 12)
rf = 0x0d;
else
rf = 0x0b;
run_rt3070_rf_write(sc, 59, rf);
} else {
run_rt3070_rf_write(sc, 55, 0x44);
run_rt3070_rf_write(sc, 59, 0x8f);
}
}
/* Enable VCO calibration. */
run_rt3070_rf_read(sc, 3, &rf);
rf |= RT5390_VCOCAL;
run_rt3070_rf_write(sc, 3, rf);
}
static void
run_rt5592_set_chan(struct run_softc *sc, u_int chan)
{
const struct rt5592_freqs *freqs;
uint32_t tmp;
uint8_t reg, rf, txpow_bound;
int8_t txpow1, txpow2;
int i;
run_read(sc, RT5592_DEBUG_INDEX, &tmp);
freqs = (tmp & RT5592_SEL_XTAL) ?
rt5592_freqs_40mhz : rt5592_freqs_20mhz;
/* find the settings for this channel (we know it exists) */
for (i = 0; rt2860_rf2850[i].chan != chan; i++, freqs++);
/* use Tx power values from EEPROM */
txpow1 = sc->txpow1[i];
txpow2 = sc->txpow2[i];
run_read(sc, RT3070_LDO_CFG0, &tmp);
tmp &= ~0x1c000000;
if (chan > 14)
tmp |= 0x14000000;
run_write(sc, RT3070_LDO_CFG0, tmp);
/* N setting. */
run_rt3070_rf_write(sc, 8, freqs->n & 0xff);
run_rt3070_rf_read(sc, 9, &rf);
rf &= ~(1 << 4);
rf |= ((freqs->n & 0x0100) >> 8) << 4;
run_rt3070_rf_write(sc, 9, rf);
/* K setting. */
run_rt3070_rf_read(sc, 9, &rf);
rf &= ~0x0f;
rf |= (freqs->k & 0x0f);
run_rt3070_rf_write(sc, 9, rf);
/* Mode setting. */
run_rt3070_rf_read(sc, 11, &rf);
rf &= ~0x0c;
rf |= ((freqs->m - 0x8) & 0x3) << 2;
run_rt3070_rf_write(sc, 11, rf);
run_rt3070_rf_read(sc, 9, &rf);
rf &= ~(1 << 7);
rf |= (((freqs->m - 0x8) & 0x4) >> 2) << 7;
run_rt3070_rf_write(sc, 9, rf);
/* R setting. */
run_rt3070_rf_read(sc, 11, &rf);
rf &= ~0x03;
rf |= (freqs->r - 0x1);
run_rt3070_rf_write(sc, 11, rf);
if (chan <= 14) {
/* Initialize RF registers for 2GHZ. */
for (i = 0; i < nitems(rt5592_2ghz_def_rf); i++) {
run_rt3070_rf_write(sc, rt5592_2ghz_def_rf[i].reg,
rt5592_2ghz_def_rf[i].val);
}
rf = (chan <= 10) ? 0x07 : 0x06;
run_rt3070_rf_write(sc, 23, rf);
run_rt3070_rf_write(sc, 59, rf);
run_rt3070_rf_write(sc, 55, 0x43);
/*
* RF R49/R50 Tx power ALC code.
* G-band bit<7:6>=1:0, bit<5:0> range from 0x0 ~ 0x27.
*/
reg = 2;
txpow_bound = 0x27;
} else {
/* Initialize RF registers for 5GHZ. */
for (i = 0; i < nitems(rt5592_5ghz_def_rf); i++) {
run_rt3070_rf_write(sc, rt5592_5ghz_def_rf[i].reg,
rt5592_5ghz_def_rf[i].val);
}
for (i = 0; i < nitems(rt5592_chan_5ghz); i++) {
if (chan >= rt5592_chan_5ghz[i].firstchan &&
chan <= rt5592_chan_5ghz[i].lastchan) {
run_rt3070_rf_write(sc, rt5592_chan_5ghz[i].reg,
rt5592_chan_5ghz[i].val);
}
}
/*
* RF R49/R50 Tx power ALC code.
* A-band bit<7:6>=1:1, bit<5:0> range from 0x0 ~ 0x2b.
*/
reg = 3;
txpow_bound = 0x2b;
}
/* RF R49 ch0 Tx power ALC code. */
run_rt3070_rf_read(sc, 49, &rf);
rf &= ~0xc0;
rf |= (reg << 6);
rf = (rf & ~0x3f) | (txpow1 & 0x3f);
if ((rf & 0x3f) > txpow_bound)
rf = (rf & ~0x3f) | txpow_bound;
run_rt3070_rf_write(sc, 49, rf);
/* RF R50 ch1 Tx power ALC code. */
run_rt3070_rf_read(sc, 50, &rf);
rf &= ~(1 << 7 | 1 << 6);
rf |= (reg << 6);
rf = (rf & ~0x3f) | (txpow2 & 0x3f);
if ((rf & 0x3f) > txpow_bound)
rf = (rf & ~0x3f) | txpow_bound;
run_rt3070_rf_write(sc, 50, rf);
/* Enable RF_BLOCK, PLL_PD, RX0_PD, and TX0_PD. */
run_rt3070_rf_read(sc, 1, &rf);
rf |= (RT3070_RF_BLOCK | RT3070_PLL_PD | RT3070_RX0_PD | RT3070_TX0_PD);
if (sc->ntxchains > 1)
rf |= RT3070_TX1_PD;
if (sc->nrxchains > 1)
rf |= RT3070_RX1_PD;
run_rt3070_rf_write(sc, 1, rf);
run_rt3070_rf_write(sc, 6, 0xe4);
run_rt3070_rf_write(sc, 30, 0x10);
run_rt3070_rf_write(sc, 31, 0x80);
run_rt3070_rf_write(sc, 32, 0x80);
run_adjust_freq_offset(sc);
/* Enable VCO calibration. */
run_rt3070_rf_read(sc, 3, &rf);
rf |= RT5390_VCOCAL;
run_rt3070_rf_write(sc, 3, rf);
}
static void
run_set_rx_antenna(struct run_softc *sc, int aux)
{
uint32_t tmp;
uint8_t bbp152;
if (aux) {
if (sc->rf_rev == RT5390_RF_5370) {
run_bbp_read(sc, 152, &bbp152);
run_bbp_write(sc, 152, bbp152 & ~0x80);
} else {
run_mcu_cmd(sc, RT2860_MCU_CMD_ANTSEL, 0);
run_read(sc, RT2860_GPIO_CTRL, &tmp);
run_write(sc, RT2860_GPIO_CTRL, (tmp & ~0x0808) | 0x08);
}
} else {
if (sc->rf_rev == RT5390_RF_5370) {
run_bbp_read(sc, 152, &bbp152);
run_bbp_write(sc, 152, bbp152 | 0x80);
} else {
run_mcu_cmd(sc, RT2860_MCU_CMD_ANTSEL, 1);
run_read(sc, RT2860_GPIO_CTRL, &tmp);
run_write(sc, RT2860_GPIO_CTRL, tmp & ~0x0808);
}
}
}
static int
run_set_chan(struct run_softc *sc, struct ieee80211_channel *c)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
u_int chan, group;
chan = ieee80211_chan2ieee(ic, c);
if (chan == 0 || chan == IEEE80211_CHAN_ANY)
return (EINVAL);
if (sc->mac_ver == 0x5592)
run_rt5592_set_chan(sc, chan);
else if (sc->mac_ver >= 0x5390)
run_rt5390_set_chan(sc, chan);
else if (sc->mac_ver == 0x3593)
run_rt3593_set_chan(sc, chan);
else if (sc->mac_ver == 0x3572)
run_rt3572_set_chan(sc, chan);
else if (sc->mac_ver >= 0x3070)
run_rt3070_set_chan(sc, chan);
else
run_rt2870_set_chan(sc, chan);
/* determine channel group */
if (chan <= 14)
group = 0;
else if (chan <= 64)
group = 1;
else if (chan <= 128)
group = 2;
else
group = 3;
/* XXX necessary only when group has changed! */
run_select_chan_group(sc, group);
run_delay(sc, 10);
/* Perform IQ calibration. */
if (sc->mac_ver >= 0x5392)
run_iq_calib(sc, chan);
return (0);
}
static void
run_set_channel(struct ieee80211com *ic)
{
struct run_softc *sc = ic->ic_softc;
RUN_LOCK(sc);
run_set_chan(sc, ic->ic_curchan);
RUN_UNLOCK(sc);
return;
}
static void
run_scan_start(struct ieee80211com *ic)
{
struct run_softc *sc = ic->ic_softc;
uint32_t tmp;
RUN_LOCK(sc);
/* abort TSF synchronization */
run_read(sc, RT2860_BCN_TIME_CFG, &tmp);
run_write(sc, RT2860_BCN_TIME_CFG,
tmp & ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN |
RT2860_TBTT_TIMER_EN));
- run_set_bssid(sc, sc->sc_ifp->if_broadcastaddr);
+ run_set_bssid(sc, ieee80211broadcastaddr);
RUN_UNLOCK(sc);
return;
}
static void
run_scan_end(struct ieee80211com *ic)
{
struct run_softc *sc = ic->ic_softc;
RUN_LOCK(sc);
run_enable_tsf_sync(sc);
/* XXX keep local copy */
- run_set_bssid(sc, sc->sc_bssid);
+ run_set_bssid(sc, ic->ic_macaddr);
RUN_UNLOCK(sc);
return;
}
/*
* Could be called from ieee80211_node_timeout()
* (non-sleepable thread)
*/
static void
run_update_beacon(struct ieee80211vap *vap, int item)
{
struct ieee80211com *ic = vap->iv_ic;
struct run_softc *sc = ic->ic_softc;
struct run_vap *rvp = RUN_VAP(vap);
int mcast = 0;
uint32_t i;
KASSERT(vap != NULL, ("no beacon"));
switch (item) {
case IEEE80211_BEACON_ERP:
run_updateslot(ic);
break;
case IEEE80211_BEACON_HTINFO:
run_updateprot(ic);
break;
case IEEE80211_BEACON_TIM:
mcast = 1; /*TODO*/
break;
default:
break;
}
setbit(rvp->bo.bo_flags, item);
if (rvp->beacon_mbuf == NULL) {
rvp->beacon_mbuf = ieee80211_beacon_alloc(vap->iv_bss,
&rvp->bo);
if (rvp->beacon_mbuf == NULL)
return;
}
ieee80211_beacon_update(vap->iv_bss, &rvp->bo, rvp->beacon_mbuf, mcast);
i = RUN_CMDQ_GET(&sc->cmdq_store);
DPRINTF("cmdq_store=%d\n", i);
sc->cmdq[i].func = run_update_beacon_cb;
sc->cmdq[i].arg0 = vap;
ieee80211_runtask(ic, &sc->cmdq_task);
return;
}
static void
run_update_beacon_cb(void *arg)
{
struct ieee80211vap *vap = arg;
struct run_vap *rvp = RUN_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct run_softc *sc = ic->ic_softc;
struct rt2860_txwi txwi;
struct mbuf *m;
uint16_t txwisize;
uint8_t ridx;
if (vap->iv_bss->ni_chan == IEEE80211_CHAN_ANYC)
return;
if (ic->ic_bsschan == IEEE80211_CHAN_ANYC)
return;
/*
* No need to call ieee80211_beacon_update(), run_update_beacon()
* is taking care of apropriate calls.
*/
if (rvp->beacon_mbuf == NULL) {
rvp->beacon_mbuf = ieee80211_beacon_alloc(vap->iv_bss,
&rvp->bo);
if (rvp->beacon_mbuf == NULL)
return;
}
m = rvp->beacon_mbuf;
memset(&txwi, 0, sizeof(txwi));
txwi.wcid = 0xff;
txwi.len = htole16(m->m_pkthdr.len);
/* send beacons at the lowest available rate */
ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
RT2860_RIDX_OFDM6 : RT2860_RIDX_CCK1;
txwi.phy = htole16(rt2860_rates[ridx].mcs);
if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM)
txwi.phy |= htole16(RT2860_PHY_OFDM);
txwi.txop = RT2860_TX_TXOP_HT;
txwi.flags = RT2860_TX_TS;
txwi.xflags = RT2860_TX_NSEQ;
txwisize = (sc->mac_ver == 0x5592) ?
sizeof(txwi) + sizeof(uint32_t) : sizeof(txwi);
run_write_region_1(sc, RT2860_BCN_BASE(rvp->rvp_id), (uint8_t *)&txwi,
txwisize);
run_write_region_1(sc, RT2860_BCN_BASE(rvp->rvp_id) + txwisize,
mtod(m, uint8_t *), (m->m_pkthdr.len + 1) & ~1);
}
static void
run_updateprot(struct ieee80211com *ic)
{
struct run_softc *sc = ic->ic_softc;
uint32_t i;
i = RUN_CMDQ_GET(&sc->cmdq_store);
DPRINTF("cmdq_store=%d\n", i);
sc->cmdq[i].func = run_updateprot_cb;
sc->cmdq[i].arg0 = ic;
ieee80211_runtask(ic, &sc->cmdq_task);
}
static void
run_updateprot_cb(void *arg)
{
struct ieee80211com *ic = arg;
struct run_softc *sc = ic->ic_softc;
uint32_t tmp;
tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL;
/* setup protection frame rate (MCS code) */
tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ?
rt2860_rates[RT2860_RIDX_OFDM6].mcs | RT2860_PHY_OFDM :
rt2860_rates[RT2860_RIDX_CCK11].mcs;
/* CCK frames don't require protection */
run_write(sc, RT2860_CCK_PROT_CFG, tmp);
if (ic->ic_flags & IEEE80211_F_USEPROT) {
if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
tmp |= RT2860_PROT_CTRL_RTS_CTS;
else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
tmp |= RT2860_PROT_CTRL_CTS;
}
run_write(sc, RT2860_OFDM_PROT_CFG, tmp);
}
static void
run_usb_timeout_cb(void *arg)
{
struct ieee80211vap *vap = arg;
struct run_softc *sc = vap->iv_ic->ic_softc;
RUN_LOCK_ASSERT(sc, MA_OWNED);
if(vap->iv_state == IEEE80211_S_RUN &&
vap->iv_opmode != IEEE80211_M_STA)
run_reset_livelock(sc);
else if (vap->iv_state == IEEE80211_S_SCAN) {
DPRINTF("timeout caused by scan\n");
/* cancel bgscan */
ieee80211_cancel_scan(vap);
} else
DPRINTF("timeout by unknown cause\n");
}
static void
run_reset_livelock(struct run_softc *sc)
{
uint32_t tmp;
RUN_LOCK_ASSERT(sc, MA_OWNED);
/*
* In IBSS or HostAP modes (when the hardware sends beacons), the MAC
* can run into a livelock and start sending CTS-to-self frames like
* crazy if protection is enabled. Reset MAC/BBP for a while
*/
run_read(sc, RT2860_DEBUG, &tmp);
DPRINTFN(3, "debug reg %08x\n", tmp);
if ((tmp & (1 << 29)) && (tmp & (1 << 7 | 1 << 5))) {
DPRINTF("CTS-to-self livelock detected\n");
run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_SRST);
run_delay(sc, 1);
run_write(sc, RT2860_MAC_SYS_CTRL,
RT2860_MAC_RX_EN | RT2860_MAC_TX_EN);
}
}
static void
run_update_promisc_locked(struct run_softc *sc)
{
uint32_t tmp;
run_read(sc, RT2860_RX_FILTR_CFG, &tmp);
tmp |= RT2860_DROP_UC_NOME;
- if (sc->sc_ifp->if_flags & IFF_PROMISC)
+ if (sc->sc_ic.ic_promisc > 0)
tmp &= ~RT2860_DROP_UC_NOME;
run_write(sc, RT2860_RX_FILTR_CFG, tmp);
- DPRINTF("%s promiscuous mode\n", (sc->sc_ifp->if_flags & IFF_PROMISC) ?
+ DPRINTF("%s promiscuous mode\n", (sc->sc_ic.ic_promisc > 0) ?
"entering" : "leaving");
}
static void
run_update_promisc(struct ieee80211com *ic)
{
struct run_softc *sc = ic->ic_softc;
- if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ if ((sc->sc_flags & RUN_RUNNING) == 0)
return;
RUN_LOCK(sc);
run_update_promisc_locked(sc);
RUN_UNLOCK(sc);
}
static void
run_enable_tsf_sync(struct run_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
DPRINTF("rvp_id=%d ic_opmode=%d\n", RUN_VAP(vap)->rvp_id,
ic->ic_opmode);
run_read(sc, RT2860_BCN_TIME_CFG, &tmp);
tmp &= ~0x1fffff;
tmp |= vap->iv_bss->ni_intval * 16;
tmp |= RT2860_TSF_TIMER_EN | RT2860_TBTT_TIMER_EN;
if (ic->ic_opmode == IEEE80211_M_STA) {
/*
* Local TSF is always updated with remote TSF on beacon
* reception.
*/
tmp |= 1 << RT2860_TSF_SYNC_MODE_SHIFT;
} else if (ic->ic_opmode == IEEE80211_M_IBSS) {
tmp |= RT2860_BCN_TX_EN;
/*
* Local TSF is updated with remote TSF on beacon reception
* only if the remote TSF is greater than local TSF.
*/
tmp |= 2 << RT2860_TSF_SYNC_MODE_SHIFT;
} else if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
ic->ic_opmode == IEEE80211_M_MBSS) {
tmp |= RT2860_BCN_TX_EN;
/* SYNC with nobody */
tmp |= 3 << RT2860_TSF_SYNC_MODE_SHIFT;
} else {
DPRINTF("Enabling TSF failed. undefined opmode\n");
return;
}
run_write(sc, RT2860_BCN_TIME_CFG, tmp);
}
static void
run_enable_mrr(struct run_softc *sc)
{
#define CCK(mcs) (mcs)
#define OFDM(mcs) (1 << 3 | (mcs))
run_write(sc, RT2860_LG_FBK_CFG0,
OFDM(6) << 28 | /* 54->48 */
OFDM(5) << 24 | /* 48->36 */
OFDM(4) << 20 | /* 36->24 */
OFDM(3) << 16 | /* 24->18 */
OFDM(2) << 12 | /* 18->12 */
OFDM(1) << 8 | /* 12-> 9 */
OFDM(0) << 4 | /* 9-> 6 */
OFDM(0)); /* 6-> 6 */
run_write(sc, RT2860_LG_FBK_CFG1,
CCK(2) << 12 | /* 11->5.5 */
CCK(1) << 8 | /* 5.5-> 2 */
CCK(0) << 4 | /* 2-> 1 */
CCK(0)); /* 1-> 1 */
#undef OFDM
#undef CCK
}
static void
run_set_txpreamble(struct run_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
run_read(sc, RT2860_AUTO_RSP_CFG, &tmp);
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
tmp |= RT2860_CCK_SHORT_EN;
else
tmp &= ~RT2860_CCK_SHORT_EN;
run_write(sc, RT2860_AUTO_RSP_CFG, tmp);
}
static void
run_set_basicrates(struct run_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
/* set basic rates mask */
if (ic->ic_curmode == IEEE80211_MODE_11B)
run_write(sc, RT2860_LEGACY_BASIC_RATE, 0x003);
else if (ic->ic_curmode == IEEE80211_MODE_11A)
run_write(sc, RT2860_LEGACY_BASIC_RATE, 0x150);
else /* 11g */
run_write(sc, RT2860_LEGACY_BASIC_RATE, 0x15f);
}
static void
run_set_leds(struct run_softc *sc, uint16_t which)
{
(void)run_mcu_cmd(sc, RT2860_MCU_CMD_LEDS,
which | (sc->leds & 0x7f));
}
static void
run_set_bssid(struct run_softc *sc, const uint8_t *bssid)
{
run_write(sc, RT2860_MAC_BSSID_DW0,
bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24);
run_write(sc, RT2860_MAC_BSSID_DW1,
bssid[4] | bssid[5] << 8);
}
static void
run_set_macaddr(struct run_softc *sc, const uint8_t *addr)
{
run_write(sc, RT2860_MAC_ADDR_DW0,
addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
run_write(sc, RT2860_MAC_ADDR_DW1,
addr[4] | addr[5] << 8 | 0xff << 16);
}
static void
run_updateslot(struct ieee80211com *ic)
{
struct run_softc *sc = ic->ic_softc;
uint32_t i;
i = RUN_CMDQ_GET(&sc->cmdq_store);
DPRINTF("cmdq_store=%d\n", i);
sc->cmdq[i].func = run_updateslot_cb;
- sc->cmdq[i].arg0 = ic->ic_ifp;
+ sc->cmdq[i].arg0 = ic;
ieee80211_runtask(ic, &sc->cmdq_task);
return;
}
/* ARGSUSED */
static void
run_updateslot_cb(void *arg)
{
- struct ifnet *ifp = arg;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = arg;
struct run_softc *sc = ic->ic_softc;
uint32_t tmp;
run_read(sc, RT2860_BKOFF_SLOT_CFG, &tmp);
tmp &= ~0xff;
tmp |= (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20;
run_write(sc, RT2860_BKOFF_SLOT_CFG, tmp);
}
static void
run_update_mcast(struct ieee80211com *ic)
{
-
- /* h/w filter supports getting everything or nothing */
- ic->ic_ifp->if_flags |= IFF_ALLMULTI;
}
static int8_t
run_rssi2dbm(struct run_softc *sc, uint8_t rssi, uint8_t rxchain)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_channel *c = ic->ic_curchan;
int delta;
if (IEEE80211_IS_CHAN_5GHZ(c)) {
u_int chan = ieee80211_chan2ieee(ic, c);
delta = sc->rssi_5ghz[rxchain];
/* determine channel group */
if (chan <= 64)
delta -= sc->lna[1];
else if (chan <= 128)
delta -= sc->lna[2];
else
delta -= sc->lna[3];
} else
delta = sc->rssi_2ghz[rxchain] - sc->lna[0];
return (-12 - delta - rssi);
}
static void
run_rt5390_bbp_init(struct run_softc *sc)
{
int i;
uint8_t bbp;
/* Apply maximum likelihood detection for 2 stream case. */
run_bbp_read(sc, 105, &bbp);
if (sc->nrxchains > 1)
run_bbp_write(sc, 105, bbp | RT5390_MLD);
/* Avoid data lost and CRC error. */
run_bbp_read(sc, 4, &bbp);
run_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL);
if (sc->mac_ver == 0x5592) {
for (i = 0; i < nitems(rt5592_def_bbp); i++) {
run_bbp_write(sc, rt5592_def_bbp[i].reg,
rt5592_def_bbp[i].val);
}
for (i = 0; i < nitems(rt5592_bbp_r196); i++) {
run_bbp_write(sc, 195, i + 0x80);
run_bbp_write(sc, 196, rt5592_bbp_r196[i]);
}
} else {
for (i = 0; i < nitems(rt5390_def_bbp); i++) {
run_bbp_write(sc, rt5390_def_bbp[i].reg,
rt5390_def_bbp[i].val);
}
}
if (sc->mac_ver == 0x5392) {
run_bbp_write(sc, 88, 0x90);
run_bbp_write(sc, 95, 0x9a);
run_bbp_write(sc, 98, 0x12);
run_bbp_write(sc, 106, 0x12);
run_bbp_write(sc, 134, 0xd0);
run_bbp_write(sc, 135, 0xf6);
run_bbp_write(sc, 148, 0x84);
}
run_bbp_read(sc, 152, &bbp);
run_bbp_write(sc, 152, bbp | 0x80);
/* Fix BBP254 for RT5592C. */
if (sc->mac_ver == 0x5592 && sc->mac_rev >= 0x0221) {
run_bbp_read(sc, 254, &bbp);
run_bbp_write(sc, 254, bbp | 0x80);
}
/* Disable hardware antenna diversity. */
if (sc->mac_ver == 0x5390)
run_bbp_write(sc, 154, 0);
/* Initialize Rx CCK/OFDM frequency offset report. */
run_bbp_write(sc, 142, 1);
run_bbp_write(sc, 143, 57);
}
static int
run_bbp_init(struct run_softc *sc)
{
int i, error, ntries;
uint8_t bbp0;
/* wait for BBP to wake up */
for (ntries = 0; ntries < 20; ntries++) {
if ((error = run_bbp_read(sc, 0, &bbp0)) != 0)
return error;
if (bbp0 != 0 && bbp0 != 0xff)
break;
}
if (ntries == 20)
return (ETIMEDOUT);
/* initialize BBP registers to default values */
if (sc->mac_ver >= 0x5390)
run_rt5390_bbp_init(sc);
else {
for (i = 0; i < nitems(rt2860_def_bbp); i++) {
run_bbp_write(sc, rt2860_def_bbp[i].reg,
rt2860_def_bbp[i].val);
}
}
if (sc->mac_ver == 0x3593) {
run_bbp_write(sc, 79, 0x13);
run_bbp_write(sc, 80, 0x05);
run_bbp_write(sc, 81, 0x33);
run_bbp_write(sc, 86, 0x46);
run_bbp_write(sc, 137, 0x0f);
}
/* fix BBP84 for RT2860E */
if (sc->mac_ver == 0x2860 && sc->mac_rev != 0x0101)
run_bbp_write(sc, 84, 0x19);
if (sc->mac_ver >= 0x3070 && (sc->mac_ver != 0x3593 &&
sc->mac_ver != 0x5592)) {
run_bbp_write(sc, 79, 0x13);
run_bbp_write(sc, 80, 0x05);
run_bbp_write(sc, 81, 0x33);
} else if (sc->mac_ver == 0x2860 && sc->mac_rev == 0x0100) {
run_bbp_write(sc, 69, 0x16);
run_bbp_write(sc, 73, 0x12);
}
return (0);
}
static int
run_rt3070_rf_init(struct run_softc *sc)
{
uint32_t tmp;
uint8_t bbp4, mingain, rf, target;
int i;
run_rt3070_rf_read(sc, 30, &rf);
/* toggle RF R30 bit 7 */
run_rt3070_rf_write(sc, 30, rf | 0x80);
run_delay(sc, 10);
run_rt3070_rf_write(sc, 30, rf & ~0x80);
/* initialize RF registers to default value */
if (sc->mac_ver == 0x3572) {
for (i = 0; i < nitems(rt3572_def_rf); i++) {
run_rt3070_rf_write(sc, rt3572_def_rf[i].reg,
rt3572_def_rf[i].val);
}
} else {
for (i = 0; i < nitems(rt3070_def_rf); i++) {
run_rt3070_rf_write(sc, rt3070_def_rf[i].reg,
rt3070_def_rf[i].val);
}
}
if (sc->mac_ver == 0x3070 && sc->mac_rev < 0x0201) {
/*
* Change voltage from 1.2V to 1.35V for RT3070.
* The DAC issue (RT3070_LDO_CFG0) has been fixed
* in RT3070(F).
*/
run_read(sc, RT3070_LDO_CFG0, &tmp);
tmp = (tmp & ~0x0f000000) | 0x0d000000;
run_write(sc, RT3070_LDO_CFG0, tmp);
} else if (sc->mac_ver == 0x3071) {
run_rt3070_rf_read(sc, 6, &rf);
run_rt3070_rf_write(sc, 6, rf | 0x40);
run_rt3070_rf_write(sc, 31, 0x14);
run_read(sc, RT3070_LDO_CFG0, &tmp);
tmp &= ~0x1f000000;
if (sc->mac_rev < 0x0211)
tmp |= 0x0d000000; /* 1.3V */
else
tmp |= 0x01000000; /* 1.2V */
run_write(sc, RT3070_LDO_CFG0, tmp);
/* patch LNA_PE_G1 */
run_read(sc, RT3070_GPIO_SWITCH, &tmp);
run_write(sc, RT3070_GPIO_SWITCH, tmp & ~0x20);
} else if (sc->mac_ver == 0x3572) {
run_rt3070_rf_read(sc, 6, &rf);
run_rt3070_rf_write(sc, 6, rf | 0x40);
/* increase voltage from 1.2V to 1.35V */
run_read(sc, RT3070_LDO_CFG0, &tmp);
tmp = (tmp & ~0x1f000000) | 0x0d000000;
run_write(sc, RT3070_LDO_CFG0, tmp);
if (sc->mac_rev < 0x0211 || !sc->patch_dac) {
run_delay(sc, 1); /* wait for 1msec */
/* decrease voltage back to 1.2V */
tmp = (tmp & ~0x1f000000) | 0x01000000;
run_write(sc, RT3070_LDO_CFG0, tmp);
}
}
/* select 20MHz bandwidth */
run_rt3070_rf_read(sc, 31, &rf);
run_rt3070_rf_write(sc, 31, rf & ~0x20);
/* calibrate filter for 20MHz bandwidth */
sc->rf24_20mhz = 0x1f; /* default value */
target = (sc->mac_ver < 0x3071) ? 0x16 : 0x13;
run_rt3070_filter_calib(sc, 0x07, target, &sc->rf24_20mhz);
/* select 40MHz bandwidth */
run_bbp_read(sc, 4, &bbp4);
run_bbp_write(sc, 4, (bbp4 & ~0x18) | 0x10);
run_rt3070_rf_read(sc, 31, &rf);
run_rt3070_rf_write(sc, 31, rf | 0x20);
/* calibrate filter for 40MHz bandwidth */
sc->rf24_40mhz = 0x2f; /* default value */
target = (sc->mac_ver < 0x3071) ? 0x19 : 0x15;
run_rt3070_filter_calib(sc, 0x27, target, &sc->rf24_40mhz);
/* go back to 20MHz bandwidth */
run_bbp_read(sc, 4, &bbp4);
run_bbp_write(sc, 4, bbp4 & ~0x18);
if (sc->mac_ver == 0x3572) {
/* save default BBP registers 25 and 26 values */
run_bbp_read(sc, 25, &sc->bbp25);
run_bbp_read(sc, 26, &sc->bbp26);
} else if (sc->mac_rev < 0x0201 || sc->mac_rev < 0x0211)
run_rt3070_rf_write(sc, 27, 0x03);
run_read(sc, RT3070_OPT_14, &tmp);
run_write(sc, RT3070_OPT_14, tmp | 1);
if (sc->mac_ver == 0x3070 || sc->mac_ver == 0x3071) {
run_rt3070_rf_read(sc, 17, &rf);
rf &= ~RT3070_TX_LO1;
if ((sc->mac_ver == 0x3070 ||
(sc->mac_ver == 0x3071 && sc->mac_rev >= 0x0211)) &&
!sc->ext_2ghz_lna)
rf |= 0x20; /* fix for long range Rx issue */
mingain = (sc->mac_ver == 0x3070) ? 1 : 2;
if (sc->txmixgain_2ghz >= mingain)
rf = (rf & ~0x7) | sc->txmixgain_2ghz;
run_rt3070_rf_write(sc, 17, rf);
}
if (sc->mac_ver == 0x3071) {
run_rt3070_rf_read(sc, 1, &rf);
rf &= ~(RT3070_RX0_PD | RT3070_TX0_PD);
rf |= RT3070_RF_BLOCK | RT3070_RX1_PD | RT3070_TX1_PD;
run_rt3070_rf_write(sc, 1, rf);
run_rt3070_rf_read(sc, 15, &rf);
run_rt3070_rf_write(sc, 15, rf & ~RT3070_TX_LO2);
run_rt3070_rf_read(sc, 20, &rf);
run_rt3070_rf_write(sc, 20, rf & ~RT3070_RX_LO1);
run_rt3070_rf_read(sc, 21, &rf);
run_rt3070_rf_write(sc, 21, rf & ~RT3070_RX_LO2);
}
if (sc->mac_ver == 0x3070 || sc->mac_ver == 0x3071) {
/* fix Tx to Rx IQ glitch by raising RF voltage */
run_rt3070_rf_read(sc, 27, &rf);
rf &= ~0x77;
if (sc->mac_rev < 0x0211)
rf |= 0x03;
run_rt3070_rf_write(sc, 27, rf);
}
return (0);
}
static void
run_rt3593_rf_init(struct run_softc *sc)
{
uint32_t tmp;
uint8_t rf;
int i;
/* Disable the GPIO bits 4 and 7 for LNA PE control. */
run_read(sc, RT3070_GPIO_SWITCH, &tmp);
tmp &= ~(1 << 4 | 1 << 7);
run_write(sc, RT3070_GPIO_SWITCH, tmp);
/* Initialize RF registers to default value. */
for (i = 0; i < nitems(rt3593_def_rf); i++) {
run_rt3070_rf_write(sc, rt3593_def_rf[i].reg,
rt3593_def_rf[i].val);
}
/* Toggle RF R2 to initiate calibration. */
run_rt3070_rf_write(sc, 2, RT5390_RESCAL);
/* Initialize RF frequency offset. */
run_adjust_freq_offset(sc);
run_rt3070_rf_read(sc, 18, &rf);
run_rt3070_rf_write(sc, 18, rf | RT3593_AUTOTUNE_BYPASS);
/*
* Increase voltage from 1.2V to 1.35V, wait for 1 msec to
* decrease voltage back to 1.2V.
*/
run_read(sc, RT3070_LDO_CFG0, &tmp);
tmp = (tmp & ~0x1f000000) | 0x0d000000;
run_write(sc, RT3070_LDO_CFG0, tmp);
run_delay(sc, 1);
tmp = (tmp & ~0x1f000000) | 0x01000000;
run_write(sc, RT3070_LDO_CFG0, tmp);
sc->rf24_20mhz = 0x1f;
sc->rf24_40mhz = 0x2f;
/* Save default BBP registers 25 and 26 values. */
run_bbp_read(sc, 25, &sc->bbp25);
run_bbp_read(sc, 26, &sc->bbp26);
run_read(sc, RT3070_OPT_14, &tmp);
run_write(sc, RT3070_OPT_14, tmp | 1);
}
static void
run_rt5390_rf_init(struct run_softc *sc)
{
uint32_t tmp;
uint8_t rf;
int i;
/* Toggle RF R2 to initiate calibration. */
if (sc->mac_ver == 0x5390) {
run_rt3070_rf_read(sc, 2, &rf);
run_rt3070_rf_write(sc, 2, rf | RT5390_RESCAL);
run_delay(sc, 10);
run_rt3070_rf_write(sc, 2, rf & ~RT5390_RESCAL);
} else {
run_rt3070_rf_write(sc, 2, RT5390_RESCAL);
run_delay(sc, 10);
}
/* Initialize RF registers to default value. */
if (sc->mac_ver == 0x5592) {
for (i = 0; i < nitems(rt5592_def_rf); i++) {
run_rt3070_rf_write(sc, rt5592_def_rf[i].reg,
rt5592_def_rf[i].val);
}
/* Initialize RF frequency offset. */
run_adjust_freq_offset(sc);
} else if (sc->mac_ver == 0x5392) {
for (i = 0; i < nitems(rt5392_def_rf); i++) {
run_rt3070_rf_write(sc, rt5392_def_rf[i].reg,
rt5392_def_rf[i].val);
}
if (sc->mac_rev >= 0x0223) {
run_rt3070_rf_write(sc, 23, 0x0f);
run_rt3070_rf_write(sc, 24, 0x3e);
run_rt3070_rf_write(sc, 51, 0x32);
run_rt3070_rf_write(sc, 53, 0x22);
run_rt3070_rf_write(sc, 56, 0xc1);
run_rt3070_rf_write(sc, 59, 0x0f);
}
} else {
for (i = 0; i < nitems(rt5390_def_rf); i++) {
run_rt3070_rf_write(sc, rt5390_def_rf[i].reg,
rt5390_def_rf[i].val);
}
if (sc->mac_rev >= 0x0502) {
run_rt3070_rf_write(sc, 6, 0xe0);
run_rt3070_rf_write(sc, 25, 0x80);
run_rt3070_rf_write(sc, 46, 0x73);
run_rt3070_rf_write(sc, 53, 0x00);
run_rt3070_rf_write(sc, 56, 0x42);
run_rt3070_rf_write(sc, 61, 0xd1);
}
}
sc->rf24_20mhz = 0x1f; /* default value */
sc->rf24_40mhz = (sc->mac_ver == 0x5592) ? 0 : 0x2f;
if (sc->mac_rev < 0x0211)
run_rt3070_rf_write(sc, 27, 0x3);
run_read(sc, RT3070_OPT_14, &tmp);
run_write(sc, RT3070_OPT_14, tmp | 1);
}
static int
run_rt3070_filter_calib(struct run_softc *sc, uint8_t init, uint8_t target,
uint8_t *val)
{
uint8_t rf22, rf24;
uint8_t bbp55_pb, bbp55_sb, delta;
int ntries;
/* program filter */
run_rt3070_rf_read(sc, 24, &rf24);
rf24 = (rf24 & 0xc0) | init; /* initial filter value */
run_rt3070_rf_write(sc, 24, rf24);
/* enable baseband loopback mode */
run_rt3070_rf_read(sc, 22, &rf22);
run_rt3070_rf_write(sc, 22, rf22 | 0x01);
/* set power and frequency of passband test tone */
run_bbp_write(sc, 24, 0x00);
for (ntries = 0; ntries < 100; ntries++) {
/* transmit test tone */
run_bbp_write(sc, 25, 0x90);
run_delay(sc, 10);
/* read received power */
run_bbp_read(sc, 55, &bbp55_pb);
if (bbp55_pb != 0)
break;
}
if (ntries == 100)
return (ETIMEDOUT);
/* set power and frequency of stopband test tone */
run_bbp_write(sc, 24, 0x06);
for (ntries = 0; ntries < 100; ntries++) {
/* transmit test tone */
run_bbp_write(sc, 25, 0x90);
run_delay(sc, 10);
/* read received power */
run_bbp_read(sc, 55, &bbp55_sb);
delta = bbp55_pb - bbp55_sb;
if (delta > target)
break;
/* reprogram filter */
rf24++;
run_rt3070_rf_write(sc, 24, rf24);
}
if (ntries < 100) {
if (rf24 != init)
rf24--; /* backtrack */
*val = rf24;
run_rt3070_rf_write(sc, 24, rf24);
}
/* restore initial state */
run_bbp_write(sc, 24, 0x00);
/* disable baseband loopback mode */
run_rt3070_rf_read(sc, 22, &rf22);
run_rt3070_rf_write(sc, 22, rf22 & ~0x01);
return (0);
}
static void
run_rt3070_rf_setup(struct run_softc *sc)
{
uint8_t bbp, rf;
int i;
if (sc->mac_ver == 0x3572) {
/* enable DC filter */
if (sc->mac_rev >= 0x0201)
run_bbp_write(sc, 103, 0xc0);
run_bbp_read(sc, 138, &bbp);
if (sc->ntxchains == 1)
bbp |= 0x20; /* turn off DAC1 */
if (sc->nrxchains == 1)
bbp &= ~0x02; /* turn off ADC1 */
run_bbp_write(sc, 138, bbp);
if (sc->mac_rev >= 0x0211) {
/* improve power consumption */
run_bbp_read(sc, 31, &bbp);
run_bbp_write(sc, 31, bbp & ~0x03);
}
run_rt3070_rf_read(sc, 16, &rf);
rf = (rf & ~0x07) | sc->txmixgain_2ghz;
run_rt3070_rf_write(sc, 16, rf);
} else if (sc->mac_ver == 0x3071) {
if (sc->mac_rev >= 0x0211) {
/* enable DC filter */
run_bbp_write(sc, 103, 0xc0);
/* improve power consumption */
run_bbp_read(sc, 31, &bbp);
run_bbp_write(sc, 31, bbp & ~0x03);
}
run_bbp_read(sc, 138, &bbp);
if (sc->ntxchains == 1)
bbp |= 0x20; /* turn off DAC1 */
if (sc->nrxchains == 1)
bbp &= ~0x02; /* turn off ADC1 */
run_bbp_write(sc, 138, bbp);
run_write(sc, RT2860_TX_SW_CFG1, 0);
if (sc->mac_rev < 0x0211) {
run_write(sc, RT2860_TX_SW_CFG2,
sc->patch_dac ? 0x2c : 0x0f);
} else
run_write(sc, RT2860_TX_SW_CFG2, 0);
} else if (sc->mac_ver == 0x3070) {
if (sc->mac_rev >= 0x0201) {
/* enable DC filter */
run_bbp_write(sc, 103, 0xc0);
/* improve power consumption */
run_bbp_read(sc, 31, &bbp);
run_bbp_write(sc, 31, bbp & ~0x03);
}
if (sc->mac_rev < 0x0201) {
run_write(sc, RT2860_TX_SW_CFG1, 0);
run_write(sc, RT2860_TX_SW_CFG2, 0x2c);
} else
run_write(sc, RT2860_TX_SW_CFG2, 0);
}
/* initialize RF registers from ROM for >=RT3071*/
if (sc->mac_ver >= 0x3071) {
for (i = 0; i < 10; i++) {
if (sc->rf[i].reg == 0 || sc->rf[i].reg == 0xff)
continue;
run_rt3070_rf_write(sc, sc->rf[i].reg, sc->rf[i].val);
}
}
}
static void
run_rt3593_rf_setup(struct run_softc *sc)
{
uint8_t bbp, rf;
if (sc->mac_rev >= 0x0211) {
/* Enable DC filter. */
run_bbp_write(sc, 103, 0xc0);
}
run_write(sc, RT2860_TX_SW_CFG1, 0);
if (sc->mac_rev < 0x0211) {
run_write(sc, RT2860_TX_SW_CFG2,
sc->patch_dac ? 0x2c : 0x0f);
} else
run_write(sc, RT2860_TX_SW_CFG2, 0);
run_rt3070_rf_read(sc, 50, &rf);
run_rt3070_rf_write(sc, 50, rf & ~RT3593_TX_LO2);
run_rt3070_rf_read(sc, 51, &rf);
rf = (rf & ~(RT3593_TX_LO1 | 0x0c)) |
((sc->txmixgain_2ghz & 0x07) << 2);
run_rt3070_rf_write(sc, 51, rf);
run_rt3070_rf_read(sc, 38, &rf);
run_rt3070_rf_write(sc, 38, rf & ~RT5390_RX_LO1);
run_rt3070_rf_read(sc, 39, &rf);
run_rt3070_rf_write(sc, 39, rf & ~RT5390_RX_LO2);
run_rt3070_rf_read(sc, 1, &rf);
run_rt3070_rf_write(sc, 1, rf & ~(RT3070_RF_BLOCK | RT3070_PLL_PD));
run_rt3070_rf_read(sc, 30, &rf);
rf = (rf & ~0x18) | 0x10;
run_rt3070_rf_write(sc, 30, rf);
/* Apply maximum likelihood detection for 2 stream case. */
run_bbp_read(sc, 105, &bbp);
if (sc->nrxchains > 1)
run_bbp_write(sc, 105, bbp | RT5390_MLD);
/* Avoid data lost and CRC error. */
run_bbp_read(sc, 4, &bbp);
run_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL);
run_bbp_write(sc, 92, 0x02);
run_bbp_write(sc, 82, 0x82);
run_bbp_write(sc, 106, 0x05);
run_bbp_write(sc, 104, 0x92);
run_bbp_write(sc, 88, 0x90);
run_bbp_write(sc, 148, 0xc8);
run_bbp_write(sc, 47, 0x48);
run_bbp_write(sc, 120, 0x50);
run_bbp_write(sc, 163, 0x9d);
/* SNR mapping. */
run_bbp_write(sc, 142, 0x06);
run_bbp_write(sc, 143, 0xa0);
run_bbp_write(sc, 142, 0x07);
run_bbp_write(sc, 143, 0xa1);
run_bbp_write(sc, 142, 0x08);
run_bbp_write(sc, 143, 0xa2);
run_bbp_write(sc, 31, 0x08);
run_bbp_write(sc, 68, 0x0b);
run_bbp_write(sc, 105, 0x04);
}
static void
run_rt5390_rf_setup(struct run_softc *sc)
{
uint8_t bbp, rf;
if (sc->mac_rev >= 0x0211) {
/* Enable DC filter. */
run_bbp_write(sc, 103, 0xc0);
if (sc->mac_ver != 0x5592) {
/* Improve power consumption. */
run_bbp_read(sc, 31, &bbp);
run_bbp_write(sc, 31, bbp & ~0x03);
}
}
run_bbp_read(sc, 138, &bbp);
if (sc->ntxchains == 1)
bbp |= 0x20; /* turn off DAC1 */
if (sc->nrxchains == 1)
bbp &= ~0x02; /* turn off ADC1 */
run_bbp_write(sc, 138, bbp);
run_rt3070_rf_read(sc, 38, &rf);
run_rt3070_rf_write(sc, 38, rf & ~RT5390_RX_LO1);
run_rt3070_rf_read(sc, 39, &rf);
run_rt3070_rf_write(sc, 39, rf & ~RT5390_RX_LO2);
/* Avoid data lost and CRC error. */
run_bbp_read(sc, 4, &bbp);
run_bbp_write(sc, 4, bbp | RT5390_MAC_IF_CTRL);
run_rt3070_rf_read(sc, 30, &rf);
rf = (rf & ~0x18) | 0x10;
run_rt3070_rf_write(sc, 30, rf);
if (sc->mac_ver != 0x5592) {
run_write(sc, RT2860_TX_SW_CFG1, 0);
if (sc->mac_rev < 0x0211) {
run_write(sc, RT2860_TX_SW_CFG2,
sc->patch_dac ? 0x2c : 0x0f);
} else
run_write(sc, RT2860_TX_SW_CFG2, 0);
}
}
static int
run_txrx_enable(struct run_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t tmp;
int error, ntries;
run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_MAC_TX_EN);
for (ntries = 0; ntries < 200; ntries++) {
if ((error = run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp)) != 0)
return (error);
if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0)
break;
run_delay(sc, 50);
}
if (ntries == 200)
return (ETIMEDOUT);
run_delay(sc, 50);
tmp |= RT2860_RX_DMA_EN | RT2860_TX_DMA_EN | RT2860_TX_WB_DDONE;
run_write(sc, RT2860_WPDMA_GLO_CFG, tmp);
/* enable Rx bulk aggregation (set timeout and limit) */
tmp = RT2860_USB_TX_EN | RT2860_USB_RX_EN | RT2860_USB_RX_AGG_EN |
RT2860_USB_RX_AGG_TO(128) | RT2860_USB_RX_AGG_LMT(2);
run_write(sc, RT2860_USB_DMA_CFG, tmp);
/* set Rx filter */
tmp = RT2860_DROP_CRC_ERR | RT2860_DROP_PHY_ERR;
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
tmp |= RT2860_DROP_UC_NOME | RT2860_DROP_DUPL |
RT2860_DROP_CTS | RT2860_DROP_BA | RT2860_DROP_ACK |
RT2860_DROP_VER_ERR | RT2860_DROP_CTRL_RSV |
RT2860_DROP_CFACK | RT2860_DROP_CFEND;
if (ic->ic_opmode == IEEE80211_M_STA)
tmp |= RT2860_DROP_RTS | RT2860_DROP_PSPOLL;
}
run_write(sc, RT2860_RX_FILTR_CFG, tmp);
run_write(sc, RT2860_MAC_SYS_CTRL,
RT2860_MAC_RX_EN | RT2860_MAC_TX_EN);
return (0);
}
static void
run_adjust_freq_offset(struct run_softc *sc)
{
uint8_t rf, tmp;
run_rt3070_rf_read(sc, 17, &rf);
tmp = rf;
rf = (rf & ~0x7f) | (sc->freq & 0x7f);
rf = MIN(rf, 0x5f);
if (tmp != rf)
run_mcu_cmd(sc, 0x74, (tmp << 8 ) | rf);
}
static void
run_init_locked(struct run_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t tmp;
uint8_t bbp1, bbp3;
int i;
int ridx;
int ntries;
if (ic->ic_nrunning > 1)
return;
run_stop(sc);
if (run_load_microcode(sc) != 0) {
device_printf(sc->sc_dev, "could not load 8051 microcode\n");
goto fail;
}
for (ntries = 0; ntries < 100; ntries++) {
if (run_read(sc, RT2860_ASIC_VER_ID, &tmp) != 0)
goto fail;
if (tmp != 0 && tmp != 0xffffffff)
break;
run_delay(sc, 10);
}
if (ntries == 100)
goto fail;
for (i = 0; i != RUN_EP_QUEUES; i++)
run_setup_tx_list(sc, &sc->sc_epq[i]);
- run_set_macaddr(sc, IF_LLADDR(ifp));
+ run_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
for (ntries = 0; ntries < 100; ntries++) {
if (run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp) != 0)
goto fail;
if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0)
break;
run_delay(sc, 10);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
goto fail;
}
tmp &= 0xff0;
tmp |= RT2860_TX_WB_DDONE;
run_write(sc, RT2860_WPDMA_GLO_CFG, tmp);
/* turn off PME_OEN to solve high-current issue */
run_read(sc, RT2860_SYS_CTRL, &tmp);
run_write(sc, RT2860_SYS_CTRL, tmp & ~RT2860_PME_OEN);
run_write(sc, RT2860_MAC_SYS_CTRL,
RT2860_BBP_HRST | RT2860_MAC_SRST);
run_write(sc, RT2860_USB_DMA_CFG, 0);
if (run_reset(sc) != 0) {
device_printf(sc->sc_dev, "could not reset chipset\n");
goto fail;
}
run_write(sc, RT2860_MAC_SYS_CTRL, 0);
/* init Tx power for all Tx rates (from EEPROM) */
for (ridx = 0; ridx < 5; ridx++) {
if (sc->txpow20mhz[ridx] == 0xffffffff)
continue;
run_write(sc, RT2860_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]);
}
for (i = 0; i < nitems(rt2870_def_mac); i++)
run_write(sc, rt2870_def_mac[i].reg, rt2870_def_mac[i].val);
run_write(sc, RT2860_WMM_AIFSN_CFG, 0x00002273);
run_write(sc, RT2860_WMM_CWMIN_CFG, 0x00002344);
run_write(sc, RT2860_WMM_CWMAX_CFG, 0x000034aa);
if (sc->mac_ver >= 0x5390) {
run_write(sc, RT2860_TX_SW_CFG0,
4 << RT2860_DLY_PAPE_EN_SHIFT | 4);
if (sc->mac_ver >= 0x5392) {
run_write(sc, RT2860_MAX_LEN_CFG, 0x00002fff);
if (sc->mac_ver == 0x5592) {
run_write(sc, RT2860_HT_FBK_CFG1, 0xedcba980);
run_write(sc, RT2860_TXOP_HLDR_ET, 0x00000082);
} else {
run_write(sc, RT2860_HT_FBK_CFG1, 0xedcb4980);
run_write(sc, RT2860_LG_FBK_CFG0, 0xedcba322);
}
}
} else if (sc->mac_ver == 0x3593) {
run_write(sc, RT2860_TX_SW_CFG0,
4 << RT2860_DLY_PAPE_EN_SHIFT | 2);
} else if (sc->mac_ver >= 0x3070) {
/* set delay of PA_PE assertion to 1us (unit of 0.25us) */
run_write(sc, RT2860_TX_SW_CFG0,
4 << RT2860_DLY_PAPE_EN_SHIFT);
}
/* wait while MAC is busy */
for (ntries = 0; ntries < 100; ntries++) {
if (run_read(sc, RT2860_MAC_STATUS_REG, &tmp) != 0)
goto fail;
if (!(tmp & (RT2860_RX_STATUS_BUSY | RT2860_TX_STATUS_BUSY)))
break;
run_delay(sc, 10);
}
if (ntries == 100)
goto fail;
/* clear Host to MCU mailbox */
run_write(sc, RT2860_H2M_BBPAGENT, 0);
run_write(sc, RT2860_H2M_MAILBOX, 0);
run_delay(sc, 10);
if (run_bbp_init(sc) != 0) {
device_printf(sc->sc_dev, "could not initialize BBP\n");
goto fail;
}
/* abort TSF synchronization */
run_read(sc, RT2860_BCN_TIME_CFG, &tmp);
tmp &= ~(RT2860_BCN_TX_EN | RT2860_TSF_TIMER_EN |
RT2860_TBTT_TIMER_EN);
run_write(sc, RT2860_BCN_TIME_CFG, tmp);
/* clear RX WCID search table */
run_set_region_4(sc, RT2860_WCID_ENTRY(0), 0, 512);
/* clear WCID attribute table */
run_set_region_4(sc, RT2860_WCID_ATTR(0), 0, 8 * 32);
/* hostapd sets a key before init. So, don't clear it. */
if (sc->cmdq_key_set != RUN_CMDQ_GO) {
/* clear shared key table */
run_set_region_4(sc, RT2860_SKEY(0, 0), 0, 8 * 32);
/* clear shared key mode */
run_set_region_4(sc, RT2860_SKEY_MODE_0_7, 0, 4);
}
run_read(sc, RT2860_US_CYC_CNT, &tmp);
tmp = (tmp & ~0xff) | 0x1e;
run_write(sc, RT2860_US_CYC_CNT, tmp);
if (sc->mac_rev != 0x0101)
run_write(sc, RT2860_TXOP_CTRL_CFG, 0x0000583f);
run_write(sc, RT2860_WMM_TXOP0_CFG, 0);
run_write(sc, RT2860_WMM_TXOP1_CFG, 48 << 16 | 96);
/* write vendor-specific BBP values (from EEPROM) */
if (sc->mac_ver < 0x3593) {
for (i = 0; i < 10; i++) {
if (sc->bbp[i].reg == 0 || sc->bbp[i].reg == 0xff)
continue;
run_bbp_write(sc, sc->bbp[i].reg, sc->bbp[i].val);
}
}
/* select Main antenna for 1T1R devices */
if (sc->rf_rev == RT3070_RF_3020 || sc->rf_rev == RT5390_RF_5370)
run_set_rx_antenna(sc, 0);
/* send LEDs operating mode to microcontroller */
(void)run_mcu_cmd(sc, RT2860_MCU_CMD_LED1, sc->led[0]);
(void)run_mcu_cmd(sc, RT2860_MCU_CMD_LED2, sc->led[1]);
(void)run_mcu_cmd(sc, RT2860_MCU_CMD_LED3, sc->led[2]);
if (sc->mac_ver >= 0x5390)
run_rt5390_rf_init(sc);
else if (sc->mac_ver == 0x3593)
run_rt3593_rf_init(sc);
else if (sc->mac_ver >= 0x3070)
run_rt3070_rf_init(sc);
/* disable non-existing Rx chains */
run_bbp_read(sc, 3, &bbp3);
bbp3 &= ~(1 << 3 | 1 << 4);
if (sc->nrxchains == 2)
bbp3 |= 1 << 3;
else if (sc->nrxchains == 3)
bbp3 |= 1 << 4;
run_bbp_write(sc, 3, bbp3);
/* disable non-existing Tx chains */
run_bbp_read(sc, 1, &bbp1);
if (sc->ntxchains == 1)
bbp1 &= ~(1 << 3 | 1 << 4);
run_bbp_write(sc, 1, bbp1);
if (sc->mac_ver >= 0x5390)
run_rt5390_rf_setup(sc);
else if (sc->mac_ver == 0x3593)
run_rt3593_rf_setup(sc);
else if (sc->mac_ver >= 0x3070)
run_rt3070_rf_setup(sc);
/* select default channel */
run_set_chan(sc, ic->ic_curchan);
/* setup initial protection mode */
run_updateprot_cb(ic);
/* turn radio LED on */
run_set_leds(sc, RT2860_LED_RADIO);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_flags |= RUN_RUNNING;
sc->cmdq_run = RUN_CMDQ_GO;
for (i = 0; i != RUN_N_XFER; i++)
usbd_xfer_set_stall(sc->sc_xfer[i]);
usbd_transfer_start(sc->sc_xfer[RUN_BULK_RX]);
if (run_txrx_enable(sc) != 0)
goto fail;
return;
fail:
run_stop(sc);
}
static void
-run_init(void *arg)
-{
- struct run_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
-
- RUN_LOCK(sc);
- run_init_locked(sc);
- RUN_UNLOCK(sc);
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ieee80211_start_all(ic);
-}
-
-static void
run_stop(void *arg)
{
struct run_softc *sc = (struct run_softc *)arg;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t tmp;
int i;
int ntries;
RUN_LOCK_ASSERT(sc, MA_OWNED);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_flags & RUN_RUNNING)
run_set_leds(sc, 0); /* turn all LEDs off */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_flags &= ~RUN_RUNNING;
sc->ratectl_run = RUN_RATECTL_OFF;
sc->cmdq_run = sc->cmdq_key_set;
RUN_UNLOCK(sc);
for(i = 0; i < RUN_N_XFER; i++)
usbd_transfer_drain(sc->sc_xfer[i]);
RUN_LOCK(sc);
if (sc->rx_m != NULL) {
m_free(sc->rx_m);
sc->rx_m = NULL;
}
/* Disable Tx/Rx DMA. */
if (run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp) != 0)
return;
tmp &= ~(RT2860_RX_DMA_EN | RT2860_TX_DMA_EN);
run_write(sc, RT2860_WPDMA_GLO_CFG, tmp);
for (ntries = 0; ntries < 100; ntries++) {
if (run_read(sc, RT2860_WPDMA_GLO_CFG, &tmp) != 0)
return;
if ((tmp & (RT2860_TX_DMA_BUSY | RT2860_RX_DMA_BUSY)) == 0)
break;
run_delay(sc, 10);
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
return;
}
/* disable Tx/Rx */
run_read(sc, RT2860_MAC_SYS_CTRL, &tmp);
tmp &= ~(RT2860_MAC_RX_EN | RT2860_MAC_TX_EN);
run_write(sc, RT2860_MAC_SYS_CTRL, tmp);
/* wait for pending Tx to complete */
for (ntries = 0; ntries < 100; ntries++) {
if (run_read(sc, RT2860_TXRXQ_PCNT, &tmp) != 0) {
DPRINTF("Cannot read Tx queue count\n");
break;
}
if ((tmp & RT2860_TX2Q_PCNT_MASK) == 0) {
DPRINTF("All Tx cleared\n");
break;
}
run_delay(sc, 10);
}
if (ntries >= 100)
DPRINTF("There are still pending Tx\n");
run_delay(sc, 10);
run_write(sc, RT2860_USB_DMA_CFG, 0);
run_write(sc, RT2860_MAC_SYS_CTRL, RT2860_BBP_HRST | RT2860_MAC_SRST);
run_write(sc, RT2860_MAC_SYS_CTRL, 0);
for (i = 0; i != RUN_EP_QUEUES; i++)
run_unsetup_tx_list(sc, &sc->sc_epq[i]);
}
static void
run_delay(struct run_softc *sc, u_int ms)
{
usb_pause_mtx(mtx_owned(&sc->sc_mtx) ?
&sc->sc_mtx : NULL, USB_MS_TO_TICKS(ms));
}
static device_method_t run_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, run_match),
DEVMETHOD(device_attach, run_attach),
DEVMETHOD(device_detach, run_detach),
DEVMETHOD_END
};
static driver_t run_driver = {
.name = "run",
.methods = run_methods,
.size = sizeof(struct run_softc)
};
static devclass_t run_devclass;
DRIVER_MODULE(run, uhub, run_driver, run_devclass, run_driver_loaded, NULL);
MODULE_DEPEND(run, wlan, 1, 1, 1);
MODULE_DEPEND(run, usb, 1, 1, 1);
MODULE_DEPEND(run, firmware, 1, 1, 1);
MODULE_VERSION(run, 1);
Index: head/sys/dev/usb/wlan/if_runvar.h
===================================================================
--- head/sys/dev/usb/wlan/if_runvar.h (revision 287196)
+++ head/sys/dev/usb/wlan/if_runvar.h (revision 287197)
@@ -1,263 +1,262 @@
/* $OpenBSD: if_runvar.h,v 1.3 2009/03/26 20:17:27 damien Exp $ */
/*-
* Copyright (c) 2008,2009 Damien Bergamini <damien.bergamini@free.fr>
* ported to FreeBSD by Akinori Furukoshi <moonlightakkiy@yahoo.ca>
* USB Consulting, Hans Petter Selasky <hselasky@freebsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* $FreeBSD$
*/
#ifndef _IF_RUNVAR_H_
#define _IF_RUNVAR_H_
#define RUN_MAX_RXSZ \
MIN(4096, MJUMPAGESIZE)
/* NB: "11" is the maximum number of padding bytes needed for Tx */
#define RUN_MAX_TXSZ \
(sizeof (struct rt2870_txd) + \
sizeof (struct rt2860_txwi) + \
MCLBYTES + 11)
#define RUN_TX_TIMEOUT 5000 /* ms */
/* Tx ring count was 8/endpoint, now 32 for all 4 (or 6) endpoints. */
#define RUN_TX_RING_COUNT 32
#define RUN_RX_RING_COUNT 1
#define RT2870_WCID_MAX 64
#define RUN_AID2WCID(aid) ((aid) & 0xff)
#define RUN_VAP_MAX 8
struct run_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_dbm_antsignal;
uint8_t wr_antenna;
uint8_t wr_antsignal;
} __packed __aligned(8);
#define RUN_RX_RADIOTAP_PRESENT \
(1 << IEEE80211_RADIOTAP_FLAGS | \
1 << IEEE80211_RADIOTAP_RATE | \
1 << IEEE80211_RADIOTAP_CHANNEL | \
1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL | \
1 << IEEE80211_RADIOTAP_ANTENNA | \
1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL)
struct run_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
uint8_t wt_hwqueue;
} __packed __aligned(8);
#define IEEE80211_RADIOTAP_HWQUEUE 15
#define RUN_TX_RADIOTAP_PRESENT \
(1 << IEEE80211_RADIOTAP_FLAGS | \
1 << IEEE80211_RADIOTAP_RATE | \
1 << IEEE80211_RADIOTAP_CHANNEL | \
1 << IEEE80211_RADIOTAP_HWQUEUE)
struct run_softc;
struct run_tx_data {
STAILQ_ENTRY(run_tx_data) next;
struct run_softc *sc;
struct mbuf *m;
struct ieee80211_node *ni;
uint32_t align[0]; /* dummy field */
uint8_t desc[sizeof(struct rt2870_txd) +
sizeof(struct rt2860_txwi)];
uint8_t ridx;
};
STAILQ_HEAD(run_tx_data_head, run_tx_data);
struct run_node {
struct ieee80211_node ni;
uint8_t ridx[IEEE80211_RATE_MAXSIZE];
uint8_t ctl_ridx[IEEE80211_RATE_MAXSIZE];
uint8_t amrr_ridx;
uint8_t mgt_ridx;
uint8_t fix_ridx;
};
struct run_cmdq {
void *arg0;
void *arg1;
void (*func)(void *);
struct ieee80211_key *k;
struct ieee80211_key key;
uint8_t mac[IEEE80211_ADDR_LEN];
uint8_t wcid;
};
struct run_vap {
struct ieee80211vap vap;
struct ieee80211_beacon_offsets bo;
struct mbuf *beacon_mbuf;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
uint8_t rvp_id;
};
#define RUN_VAP(vap) ((struct run_vap *)(vap))
/*
* There are 7 bulk endpoints: 1 for RX
* and 6 for TX (4 EDCAs + HCCA + Prio).
* Update 03-14-2009: some devices like the Planex GW-US300MiniS
* seem to have only 4 TX bulk endpoints (Fukaumi Naoki).
*/
enum {
RUN_BULK_TX_BE, /* = WME_AC_BE */
RUN_BULK_TX_BK, /* = WME_AC_BK */
RUN_BULK_TX_VI, /* = WME_AC_VI */
RUN_BULK_TX_VO, /* = WME_AC_VO */
RUN_BULK_TX_HCCA,
RUN_BULK_TX_PRIO,
RUN_BULK_RX,
RUN_N_XFER,
};
#define RUN_EP_QUEUES RUN_BULK_RX
struct run_endpoint_queue {
struct run_tx_data tx_data[RUN_TX_RING_COUNT];
struct run_tx_data_head tx_qh;
struct run_tx_data_head tx_fh;
uint32_t tx_nfree;
};
struct run_softc {
+ struct mtx sc_mtx;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct usb_device *sc_udev;
- struct ifnet *sc_ifp;
int sc_need_fwload;
int sc_flags;
#define RUN_FLAG_FWLOAD_NEEDED 0x01
+#define RUN_RUNNING 0x02
uint16_t wcid_stats[RT2870_WCID_MAX + 1][3];
#define RUN_TXCNT 0
#define RUN_SUCCESS 1
#define RUN_RETRY 2
int (*sc_srom_read)(struct run_softc *,
uint16_t, uint16_t *);
uint16_t mac_ver;
uint16_t mac_rev;
uint16_t rf_rev;
uint8_t freq;
uint8_t ntxchains;
uint8_t nrxchains;
uint8_t bbp25;
uint8_t bbp26;
uint8_t rf24_20mhz;
uint8_t rf24_40mhz;
uint8_t patch_dac;
uint8_t rfswitch;
uint8_t ext_2ghz_lna;
uint8_t ext_5ghz_lna;
uint8_t calib_2ghz;
uint8_t calib_5ghz;
uint8_t txmixgain_2ghz;
uint8_t txmixgain_5ghz;
int8_t txpow1[54];
int8_t txpow2[54];
int8_t txpow3[54];
int8_t rssi_2ghz[3];
int8_t rssi_5ghz[3];
uint8_t lna[4];
struct {
uint8_t reg;
uint8_t val;
} bbp[10], rf[10];
uint8_t leds;
uint16_t led[3];
uint32_t txpow20mhz[5];
uint32_t txpow40mhz_2ghz[5];
uint32_t txpow40mhz_5ghz[5];
-
- uint8_t sc_bssid[6];
-
- struct mtx sc_mtx;
struct run_endpoint_queue sc_epq[RUN_EP_QUEUES];
struct task ratectl_task;
struct usb_callout ratectl_ch;
uint8_t ratectl_run;
#define RUN_RATECTL_OFF 0
/* need to be power of 2, otherwise RUN_CMDQ_GET fails */
#define RUN_CMDQ_MAX 16
#define RUN_CMDQ_MASQ (RUN_CMDQ_MAX - 1)
struct run_cmdq cmdq[RUN_CMDQ_MAX];
struct task cmdq_task;
uint32_t cmdq_store;
uint8_t cmdq_exec;
uint8_t cmdq_run;
uint8_t cmdq_key_set;
#define RUN_CMDQ_ABORT 0
#define RUN_CMDQ_GO 1
struct usb_xfer *sc_xfer[RUN_N_XFER];
struct mbuf *rx_m;
uint8_t fifo_cnt;
uint8_t running;
uint8_t runbmap;
uint8_t ap_running;
uint8_t adhoc_running;
uint8_t sta_running;
uint8_t rvp_cnt;
uint8_t rvp_bmap;
uint8_t sc_detached;
union {
struct run_rx_radiotap_header th;
uint8_t pad[64];
} sc_rxtapu;
#define sc_rxtap sc_rxtapu.th
int sc_rxtap_len;
union {
struct run_tx_radiotap_header th;
uint8_t pad[64];
} sc_txtapu;
#define sc_txtap sc_txtapu.th
int sc_txtap_len;
};
#define RUN_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define RUN_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define RUN_LOCK_ASSERT(sc, t) mtx_assert(&(sc)->sc_mtx, t)
#endif /* _IF_RUNVAR_H_ */
Index: head/sys/dev/usb/wlan/if_uath.c
===================================================================
--- head/sys/dev/usb/wlan/if_uath.c (revision 287196)
+++ head/sys/dev/usb/wlan/if_uath.c (revision 287197)
@@ -1,2909 +1,2825 @@
/*-
* Copyright (c) 2006 Sam Leffler, Errno Consulting
* Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
/*
* This driver is distantly derived from a driver of the same name
* by Damien Bergamini. The original copyright is included below:
*
* Copyright (c) 2006
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Driver for Atheros AR5523 USB parts.
*
* The driver requires firmware to be loaded into the device. This
* is done on device discovery from a user application (uathload)
* that is launched by devd when a device with suitable product ID
* is recognized. Once firmware has been loaded the device will
* reset the USB port and re-attach with the original product ID+1
* and this driver will be attached. The firmware is licensed for
* general use (royalty free) and may be incorporated in products.
* Note that the firmware normally packaged with the NDIS drivers
* for these devices does not work in this way and so does not work
* with this driver.
*/
#include <sys/param.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kdb.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#endif
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include "usbdevs.h"
#include <dev/usb/wlan/if_uathreg.h>
#include <dev/usb/wlan/if_uathvar.h>
static SYSCTL_NODE(_hw_usb, OID_AUTO, uath, CTLFLAG_RW, 0, "USB Atheros");
static int uath_countrycode = CTRY_DEFAULT; /* country code */
SYSCTL_INT(_hw_usb_uath, OID_AUTO, countrycode, CTLFLAG_RWTUN, &uath_countrycode,
0, "country code");
static int uath_regdomain = 0; /* regulatory domain */
SYSCTL_INT(_hw_usb_uath, OID_AUTO, regdomain, CTLFLAG_RD, &uath_regdomain,
0, "regulatory domain");
#ifdef UATH_DEBUG
int uath_debug = 0;
SYSCTL_INT(_hw_usb_uath, OID_AUTO, debug, CTLFLAG_RWTUN, &uath_debug, 0,
"uath debug level");
enum {
UATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
UATH_DEBUG_XMIT_DUMP = 0x00000002, /* xmit dump */
UATH_DEBUG_RECV = 0x00000004, /* basic recv operation */
UATH_DEBUG_TX_PROC = 0x00000008, /* tx ISR proc */
UATH_DEBUG_RX_PROC = 0x00000010, /* rx ISR proc */
UATH_DEBUG_RECV_ALL = 0x00000020, /* trace all frames (beacons) */
UATH_DEBUG_INIT = 0x00000040, /* initialization of dev */
UATH_DEBUG_DEVCAP = 0x00000080, /* dev caps */
UATH_DEBUG_CMDS = 0x00000100, /* commands */
UATH_DEBUG_CMDS_DUMP = 0x00000200, /* command buffer dump */
UATH_DEBUG_RESET = 0x00000400, /* reset processing */
UATH_DEBUG_STATE = 0x00000800, /* 802.11 state transitions */
UATH_DEBUG_MULTICAST = 0x00001000, /* multicast */
UATH_DEBUG_WME = 0x00002000, /* WME */
UATH_DEBUG_CHANNEL = 0x00004000, /* channel */
UATH_DEBUG_RATES = 0x00008000, /* rates */
UATH_DEBUG_CRYPTO = 0x00010000, /* crypto */
UATH_DEBUG_LED = 0x00020000, /* LED */
UATH_DEBUG_ANY = 0xffffffff
};
#define DPRINTF(sc, m, fmt, ...) do { \
if (sc->sc_debug & (m)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, m, fmt, ...) do { \
(void) sc; \
} while (0)
#endif
/* unaligned little endian access */
#define LE_READ_2(p) \
((u_int16_t) \
((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8)))
#define LE_READ_4(p) \
((u_int32_t) \
((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \
(((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
/* recognized device vendors/products */
static const STRUCT_USB_HOST_ID uath_devs[] = {
#define UATH_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) }
UATH_DEV(ACCTON, SMCWUSBTG2),
UATH_DEV(ATHEROS, AR5523),
UATH_DEV(ATHEROS2, AR5523_1),
UATH_DEV(ATHEROS2, AR5523_2),
UATH_DEV(ATHEROS2, AR5523_3),
UATH_DEV(CONCEPTRONIC, AR5523_1),
UATH_DEV(CONCEPTRONIC, AR5523_2),
UATH_DEV(DLINK, DWLAG122),
UATH_DEV(DLINK, DWLAG132),
UATH_DEV(DLINK, DWLG132),
UATH_DEV(DLINK2, DWA120),
UATH_DEV(GIGASET, AR5523),
UATH_DEV(GIGASET, SMCWUSBTG),
UATH_DEV(GLOBALSUN, AR5523_1),
UATH_DEV(GLOBALSUN, AR5523_2),
UATH_DEV(NETGEAR, WG111U),
UATH_DEV(NETGEAR3, WG111T),
UATH_DEV(NETGEAR3, WPN111),
UATH_DEV(NETGEAR3, WPN111_2),
UATH_DEV(UMEDIA, TEW444UBEU),
UATH_DEV(UMEDIA, AR5523_2),
UATH_DEV(WISTRONNEWEB, AR5523_1),
UATH_DEV(WISTRONNEWEB, AR5523_2),
UATH_DEV(ZCOM, AR5523)
#undef UATH_DEV
};
static usb_callback_t uath_intr_rx_callback;
static usb_callback_t uath_intr_tx_callback;
static usb_callback_t uath_bulk_rx_callback;
static usb_callback_t uath_bulk_tx_callback;
static const struct usb_config uath_usbconfig[UATH_N_XFERS] = {
[UATH_INTR_RX] = {
.type = UE_BULK,
.endpoint = 0x1,
.direction = UE_DIR_IN,
.bufsize = UATH_MAX_CMDSZ,
.flags = {
.pipe_bof = 1,
.short_xfer_ok = 1
},
.callback = uath_intr_rx_callback
},
[UATH_INTR_TX] = {
.type = UE_BULK,
.endpoint = 0x1,
.direction = UE_DIR_OUT,
.bufsize = UATH_MAX_CMDSZ * UATH_CMD_LIST_COUNT,
.flags = {
.force_short_xfer = 1,
.pipe_bof = 1,
},
.callback = uath_intr_tx_callback,
.timeout = UATH_CMD_TIMEOUT
},
[UATH_BULK_RX] = {
.type = UE_BULK,
.endpoint = 0x2,
.direction = UE_DIR_IN,
.bufsize = MCLBYTES,
.flags = {
.ext_buffer = 1,
.pipe_bof = 1,
.short_xfer_ok = 1
},
.callback = uath_bulk_rx_callback
},
[UATH_BULK_TX] = {
.type = UE_BULK,
.endpoint = 0x2,
.direction = UE_DIR_OUT,
.bufsize = UATH_MAX_TXBUFSZ * UATH_TX_DATA_LIST_COUNT,
.flags = {
.force_short_xfer = 1,
.pipe_bof = 1
},
.callback = uath_bulk_tx_callback,
.timeout = UATH_DATA_TIMEOUT
}
};
static struct ieee80211vap *uath_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void uath_vap_delete(struct ieee80211vap *);
static int uath_alloc_cmd_list(struct uath_softc *, struct uath_cmd []);
static void uath_free_cmd_list(struct uath_softc *, struct uath_cmd []);
static int uath_host_available(struct uath_softc *);
static int uath_get_capability(struct uath_softc *, uint32_t, uint32_t *);
static int uath_get_devcap(struct uath_softc *);
static struct uath_cmd *
uath_get_cmdbuf(struct uath_softc *);
static int uath_cmd_read(struct uath_softc *, uint32_t, const void *,
int, void *, int, int);
static int uath_cmd_write(struct uath_softc *, uint32_t, const void *,
int, int);
static void uath_stat(void *);
#ifdef UATH_DEBUG
static void uath_dump_cmd(const uint8_t *, int, char);
static const char *
uath_codename(int);
#endif
static int uath_get_devstatus(struct uath_softc *,
uint8_t macaddr[IEEE80211_ADDR_LEN]);
static int uath_get_status(struct uath_softc *, uint32_t, void *, int);
static int uath_alloc_rx_data_list(struct uath_softc *);
static int uath_alloc_tx_data_list(struct uath_softc *);
static void uath_free_rx_data_list(struct uath_softc *);
static void uath_free_tx_data_list(struct uath_softc *);
-static int uath_init_locked(void *);
-static void uath_init(void *);
-static void uath_stop_locked(struct ifnet *);
-static void uath_stop(struct ifnet *);
-static int uath_ioctl(struct ifnet *, u_long, caddr_t);
-static void uath_start(struct ifnet *);
+static int uath_init(struct uath_softc *);
+static void uath_stop(struct uath_softc *);
+static void uath_parent(struct ieee80211com *);
+static int uath_transmit(struct ieee80211com *, struct mbuf *);
+static void uath_start(struct uath_softc *);
static int uath_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void uath_scan_start(struct ieee80211com *);
static void uath_scan_end(struct ieee80211com *);
static void uath_set_channel(struct ieee80211com *);
static void uath_update_mcast(struct ieee80211com *);
static void uath_update_promisc(struct ieee80211com *);
static int uath_config(struct uath_softc *, uint32_t, uint32_t);
static int uath_config_multi(struct uath_softc *, uint32_t, const void *,
int);
static int uath_switch_channel(struct uath_softc *,
struct ieee80211_channel *);
static int uath_set_rxfilter(struct uath_softc *, uint32_t, uint32_t);
static void uath_watchdog(void *);
static void uath_abort_xfers(struct uath_softc *);
static int uath_dataflush(struct uath_softc *);
static int uath_cmdflush(struct uath_softc *);
static int uath_flush(struct uath_softc *);
static int uath_set_ledstate(struct uath_softc *, int);
static int uath_set_chan(struct uath_softc *, struct ieee80211_channel *);
static int uath_reset_tx_queues(struct uath_softc *);
static int uath_wme_init(struct uath_softc *);
static struct uath_data *
uath_getbuf(struct uath_softc *);
static int uath_newstate(struct ieee80211vap *, enum ieee80211_state,
int);
static int uath_set_key(struct uath_softc *,
const struct ieee80211_key *, int);
static int uath_set_keys(struct uath_softc *, struct ieee80211vap *);
static void uath_sysctl_node(struct uath_softc *);
static int
uath_match(device_t dev)
{
struct usb_attach_arg *uaa = device_get_ivars(dev);
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != UATH_CONFIG_INDEX)
return (ENXIO);
if (uaa->info.bIfaceIndex != UATH_IFACE_INDEX)
return (ENXIO);
return (usbd_lookup_id_by_uaa(uath_devs, sizeof(uath_devs), uaa));
}
static int
uath_attach(device_t dev)
{
struct uath_softc *sc = device_get_softc(dev);
struct usb_attach_arg *uaa = device_get_ivars(dev);
- struct ieee80211com *ic;
- struct ifnet *ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t bands, iface_index = UATH_IFACE_INDEX; /* XXX */
usb_error_t error;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
sc->sc_dev = dev;
sc->sc_udev = uaa->device;
#ifdef UATH_DEBUG
sc->sc_debug = uath_debug;
#endif
device_set_usb_desc(dev);
/*
* Only post-firmware devices here.
*/
mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init(&sc->stat_ch, 0);
callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
uath_usbconfig, UATH_N_XFERS, sc, &sc->sc_mtx);
if (error) {
device_printf(dev, "could not allocate USB transfers, "
"err=%s\n", usbd_errstr(error));
goto fail;
}
sc->sc_cmd_dma_buf =
usbd_xfer_get_frame_buffer(sc->sc_xfer[UATH_INTR_TX], 0);
sc->sc_tx_dma_buf =
usbd_xfer_get_frame_buffer(sc->sc_xfer[UATH_BULK_TX], 0);
/*
* Setup buffers for firmware commands.
*/
error = uath_alloc_cmd_list(sc, sc->sc_cmd);
if (error != 0) {
device_printf(sc->sc_dev,
"could not allocate Tx command list\n");
goto fail1;
}
/*
* We're now ready to send+receive firmware commands.
*/
UATH_LOCK(sc);
error = uath_host_available(sc);
if (error != 0) {
device_printf(sc->sc_dev, "could not initialize adapter\n");
- goto fail3;
+ goto fail2;
}
error = uath_get_devcap(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"could not get device capabilities\n");
- goto fail3;
+ goto fail2;
}
UATH_UNLOCK(sc);
/* Create device sysctl node. */
uath_sysctl_node(sc);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not allocate ifnet\n");
- error = ENXIO;
- goto fail2;
- }
-
UATH_LOCK(sc);
- error = uath_get_devstatus(sc, macaddr);
+ error = uath_get_devstatus(sc, ic->ic_macaddr);
if (error != 0) {
device_printf(sc->sc_dev, "could not get device status\n");
- goto fail4;
+ goto fail2;
}
/*
* Allocate xfers for Rx/Tx data pipes.
*/
error = uath_alloc_rx_data_list(sc);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Rx data list\n");
- goto fail4;
+ goto fail2;
}
error = uath_alloc_tx_data_list(sc);
if (error != 0) {
device_printf(sc->sc_dev, "could not allocate Tx data list\n");
- goto fail4;
+ goto fail2;
}
UATH_UNLOCK(sc);
- ifp->if_softc = sc;
- if_initname(ifp, "uath", device_get_unit(sc->sc_dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = uath_init;
- ifp->if_ioctl = uath_ioctl;
- ifp->if_start = uath_start;
- /* XXX UATH_TX_DATA_LIST_COUNT */
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic = ifp->if_l2com;
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA | /* station mode */
IEEE80211_C_MONITOR | /* monitor mode supported */
IEEE80211_C_TXPMGT | /* tx power management */
IEEE80211_C_SHPREAMBLE | /* short preamble supported */
IEEE80211_C_SHSLOT | /* short slot time supported */
IEEE80211_C_WPA | /* 802.11i */
IEEE80211_C_BGSCAN | /* capable of bg scanning */
IEEE80211_C_TXFRAG; /* handle tx frags */
/* put a regulatory domain to reveal informations. */
uath_regdomain = sc->sc_devcap.regDomain;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
if ((sc->sc_devcap.analog5GhzRevision & 0xf0) == 0x30)
setbit(&bands, IEEE80211_MODE_11A);
/* XXX turbo */
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
ic->ic_raw_xmit = uath_raw_xmit;
ic->ic_scan_start = uath_scan_start;
ic->ic_scan_end = uath_scan_end;
ic->ic_set_channel = uath_set_channel;
-
ic->ic_vap_create = uath_vap_create;
ic->ic_vap_delete = uath_vap_delete;
ic->ic_update_mcast = uath_update_mcast;
ic->ic_update_promisc = uath_update_promisc;
+ ic->ic_transmit = uath_transmit;
+ ic->ic_parent = uath_parent;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
UATH_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
UATH_RX_RADIOTAP_PRESENT);
if (bootverbose)
ieee80211_announce(ic);
return (0);
-fail4: if_free(ifp);
-fail3: UATH_UNLOCK(sc);
-fail2: uath_free_cmd_list(sc, sc->sc_cmd);
+fail2: UATH_UNLOCK(sc);
+ uath_free_cmd_list(sc, sc->sc_cmd);
fail1: usbd_transfer_unsetup(sc->sc_xfer, UATH_N_XFERS);
fail:
return (error);
}
static int
uath_detach(device_t dev)
{
struct uath_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
unsigned int x;
/*
* Prevent further allocations from RX/TX/CMD
* data lists and ioctls
*/
UATH_LOCK(sc);
sc->sc_flags |= UATH_FLAG_INVALID;
STAILQ_INIT(&sc->sc_rx_active);
STAILQ_INIT(&sc->sc_rx_inactive);
STAILQ_INIT(&sc->sc_tx_active);
STAILQ_INIT(&sc->sc_tx_inactive);
STAILQ_INIT(&sc->sc_tx_pending);
STAILQ_INIT(&sc->sc_cmd_active);
STAILQ_INIT(&sc->sc_cmd_pending);
STAILQ_INIT(&sc->sc_cmd_waiting);
STAILQ_INIT(&sc->sc_cmd_inactive);
+
+ uath_stop(sc);
UATH_UNLOCK(sc);
- uath_stop(ifp);
-
callout_drain(&sc->stat_ch);
callout_drain(&sc->watchdog_ch);
/* drain USB transfers */
for (x = 0; x != UATH_N_XFERS; x++)
usbd_transfer_drain(sc->sc_xfer[x]);
/* free data buffers */
UATH_LOCK(sc);
uath_free_rx_data_list(sc);
uath_free_tx_data_list(sc);
uath_free_cmd_list(sc, sc->sc_cmd);
UATH_UNLOCK(sc);
/* free USB transfers and some data buffers */
usbd_transfer_unsetup(sc->sc_xfer, UATH_N_XFERS);
ieee80211_ifdetach(ic);
- if_free(ifp);
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static void
uath_free_cmd_list(struct uath_softc *sc, struct uath_cmd cmds[])
{
int i;
for (i = 0; i != UATH_CMD_LIST_COUNT; i++)
cmds[i].buf = NULL;
}
static int
uath_alloc_cmd_list(struct uath_softc *sc, struct uath_cmd cmds[])
{
int i;
STAILQ_INIT(&sc->sc_cmd_active);
STAILQ_INIT(&sc->sc_cmd_pending);
STAILQ_INIT(&sc->sc_cmd_waiting);
STAILQ_INIT(&sc->sc_cmd_inactive);
for (i = 0; i != UATH_CMD_LIST_COUNT; i++) {
struct uath_cmd *cmd = &cmds[i];
cmd->sc = sc; /* backpointer for callbacks */
cmd->msgid = i;
cmd->buf = ((uint8_t *)sc->sc_cmd_dma_buf) +
(i * UATH_MAX_CMDSZ);
STAILQ_INSERT_TAIL(&sc->sc_cmd_inactive, cmd, next);
UATH_STAT_INC(sc, st_cmd_inactive);
}
return (0);
}
static int
uath_host_available(struct uath_softc *sc)
{
struct uath_cmd_host_available setup;
UATH_ASSERT_LOCKED(sc);
/* inform target the host is available */
setup.sw_ver_major = htobe32(ATH_SW_VER_MAJOR);
setup.sw_ver_minor = htobe32(ATH_SW_VER_MINOR);
setup.sw_ver_patch = htobe32(ATH_SW_VER_PATCH);
setup.sw_ver_build = htobe32(ATH_SW_VER_BUILD);
return uath_cmd_read(sc, WDCMSG_HOST_AVAILABLE,
&setup, sizeof setup, NULL, 0, 0);
}
#ifdef UATH_DEBUG
static void
uath_dump_cmd(const uint8_t *buf, int len, char prefix)
{
const char *sep = "";
int i;
for (i = 0; i < len; i++) {
if ((i % 16) == 0) {
printf("%s%c ", sep, prefix);
sep = "\n";
}
else if ((i % 4) == 0)
printf(" ");
printf("%02x", buf[i]);
}
printf("\n");
}
static const char *
uath_codename(int code)
{
#define N(a) (sizeof(a)/sizeof(a[0]))
static const char *names[] = {
"0x00",
"HOST_AVAILABLE",
"BIND",
"TARGET_RESET",
"TARGET_GET_CAPABILITY",
"TARGET_SET_CONFIG",
"TARGET_GET_STATUS",
"TARGET_GET_STATS",
"TARGET_START",
"TARGET_STOP",
"TARGET_ENABLE",
"TARGET_DISABLE",
"CREATE_CONNECTION",
"UPDATE_CONNECT_ATTR",
"DELETE_CONNECT",
"SEND",
"FLUSH",
"STATS_UPDATE",
"BMISS",
"DEVICE_AVAIL",
"SEND_COMPLETE",
"DATA_AVAIL",
"SET_PWR_MODE",
"BMISS_ACK",
"SET_LED_STEADY",
"SET_LED_BLINK",
"SETUP_BEACON_DESC",
"BEACON_INIT",
"RESET_KEY_CACHE",
"RESET_KEY_CACHE_ENTRY",
"SET_KEY_CACHE_ENTRY",
"SET_DECOMP_MASK",
"SET_REGULATORY_DOMAIN",
"SET_LED_STATE",
"WRITE_ASSOCID",
"SET_STA_BEACON_TIMERS",
"GET_TSF",
"RESET_TSF",
"SET_ADHOC_MODE",
"SET_BASIC_RATE",
"MIB_CONTROL",
"GET_CHANNEL_DATA",
"GET_CUR_RSSI",
"SET_ANTENNA_SWITCH",
"0x2c", "0x2d", "0x2e",
"USE_SHORT_SLOT_TIME",
"SET_POWER_MODE",
"SETUP_PSPOLL_DESC",
"SET_RX_MULTICAST_FILTER",
"RX_FILTER",
"PER_CALIBRATION",
"RESET",
"DISABLE",
"PHY_DISABLE",
"SET_TX_POWER_LIMIT",
"SET_TX_QUEUE_PARAMS",
"SETUP_TX_QUEUE",
"RELEASE_TX_QUEUE",
};
static char buf[8];
if (code < N(names))
return names[code];
if (code == WDCMSG_SET_DEFAULT_KEY)
return "SET_DEFAULT_KEY";
snprintf(buf, sizeof(buf), "0x%02x", code);
return buf;
#undef N
}
#endif
/*
* Low-level function to send read or write commands to the firmware.
*/
static int
uath_cmdsend(struct uath_softc *sc, uint32_t code, const void *idata, int ilen,
void *odata, int olen, int flags)
{
struct uath_cmd_hdr *hdr;
struct uath_cmd *cmd;
int error;
UATH_ASSERT_LOCKED(sc);
/* grab a xfer */
cmd = uath_get_cmdbuf(sc);
if (cmd == NULL) {
device_printf(sc->sc_dev, "%s: empty inactive queue\n",
__func__);
return (ENOBUFS);
}
cmd->flags = flags;
/* always bulk-out a multiple of 4 bytes */
cmd->buflen = roundup2(sizeof(struct uath_cmd_hdr) + ilen, 4);
hdr = (struct uath_cmd_hdr *)cmd->buf;
memset(hdr, 0, sizeof(struct uath_cmd_hdr));
hdr->len = htobe32(cmd->buflen);
hdr->code = htobe32(code);
hdr->msgid = cmd->msgid; /* don't care about endianness */
hdr->magic = htobe32((cmd->flags & UATH_CMD_FLAG_MAGIC) ? 1 << 24 : 0);
memcpy((uint8_t *)(hdr + 1), idata, ilen);
#ifdef UATH_DEBUG
if (sc->sc_debug & UATH_DEBUG_CMDS) {
printf("%s: send %s [flags 0x%x] olen %d\n",
__func__, uath_codename(code), cmd->flags, olen);
if (sc->sc_debug & UATH_DEBUG_CMDS_DUMP)
uath_dump_cmd(cmd->buf, cmd->buflen, '+');
}
#endif
cmd->odata = odata;
KASSERT(odata == NULL ||
olen < UATH_MAX_CMDSZ - sizeof(*hdr) + sizeof(uint32_t),
("odata %p olen %u", odata, olen));
cmd->olen = olen;
STAILQ_INSERT_TAIL(&sc->sc_cmd_pending, cmd, next);
UATH_STAT_INC(sc, st_cmd_pending);
usbd_transfer_start(sc->sc_xfer[UATH_INTR_TX]);
if (cmd->flags & UATH_CMD_FLAG_READ) {
usbd_transfer_start(sc->sc_xfer[UATH_INTR_RX]);
/* wait at most two seconds for command reply */
error = mtx_sleep(cmd, &sc->sc_mtx, 0, "uathcmd", 2 * hz);
cmd->odata = NULL; /* in case reply comes too late */
if (error != 0) {
device_printf(sc->sc_dev, "timeout waiting for reply "
"to cmd 0x%x (%u)\n", code, code);
} else if (cmd->olen != olen) {
device_printf(sc->sc_dev, "unexpected reply data count "
"to cmd 0x%x (%u), got %u, expected %u\n",
code, code, cmd->olen, olen);
error = EINVAL;
}
return (error);
}
return (0);
}
static int
uath_cmd_read(struct uath_softc *sc, uint32_t code, const void *idata,
int ilen, void *odata, int olen, int flags)
{
flags |= UATH_CMD_FLAG_READ;
return uath_cmdsend(sc, code, idata, ilen, odata, olen, flags);
}
static int
uath_cmd_write(struct uath_softc *sc, uint32_t code, const void *data, int len,
int flags)
{
flags &= ~UATH_CMD_FLAG_READ;
return uath_cmdsend(sc, code, data, len, NULL, 0, flags);
}
static struct uath_cmd *
uath_get_cmdbuf(struct uath_softc *sc)
{
struct uath_cmd *uc;
UATH_ASSERT_LOCKED(sc);
uc = STAILQ_FIRST(&sc->sc_cmd_inactive);
if (uc != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_cmd_inactive, next);
UATH_STAT_DEC(sc, st_cmd_inactive);
} else
uc = NULL;
if (uc == NULL)
DPRINTF(sc, UATH_DEBUG_XMIT, "%s: %s\n", __func__,
"out of command xmit buffers");
return (uc);
}
/*
* This function is called periodically (every second) when associated to
* query device statistics.
*/
static void
uath_stat(void *arg)
{
struct uath_softc *sc = arg;
int error;
UATH_LOCK(sc);
/*
* Send request for statistics asynchronously. The timer will be
* restarted when we'll get the stats notification.
*/
error = uath_cmd_write(sc, WDCMSG_TARGET_GET_STATS, NULL, 0,
UATH_CMD_FLAG_ASYNC);
if (error != 0) {
device_printf(sc->sc_dev,
"could not query stats, error %d\n", error);
}
UATH_UNLOCK(sc);
}
static int
uath_get_capability(struct uath_softc *sc, uint32_t cap, uint32_t *val)
{
int error;
cap = htobe32(cap);
error = uath_cmd_read(sc, WDCMSG_TARGET_GET_CAPABILITY,
&cap, sizeof cap, val, sizeof(uint32_t), UATH_CMD_FLAG_MAGIC);
if (error != 0) {
device_printf(sc->sc_dev, "could not read capability %u\n",
be32toh(cap));
return (error);
}
*val = be32toh(*val);
return (error);
}
static int
uath_get_devcap(struct uath_softc *sc)
{
#define GETCAP(x, v) do { \
error = uath_get_capability(sc, x, &v); \
if (error != 0) \
return (error); \
DPRINTF(sc, UATH_DEBUG_DEVCAP, \
"%s: %s=0x%08x\n", __func__, #x, v); \
} while (0)
struct uath_devcap *cap = &sc->sc_devcap;
int error;
/* collect device capabilities */
GETCAP(CAP_TARGET_VERSION, cap->targetVersion);
GETCAP(CAP_TARGET_REVISION, cap->targetRevision);
GETCAP(CAP_MAC_VERSION, cap->macVersion);
GETCAP(CAP_MAC_REVISION, cap->macRevision);
GETCAP(CAP_PHY_REVISION, cap->phyRevision);
GETCAP(CAP_ANALOG_5GHz_REVISION, cap->analog5GhzRevision);
GETCAP(CAP_ANALOG_2GHz_REVISION, cap->analog2GhzRevision);
GETCAP(CAP_REG_DOMAIN, cap->regDomain);
GETCAP(CAP_REG_CAP_BITS, cap->regCapBits);
#if 0
/* NB: not supported in rev 1.5 */
GETCAP(CAP_COUNTRY_CODE, cap->countryCode);
#endif
GETCAP(CAP_WIRELESS_MODES, cap->wirelessModes);
GETCAP(CAP_CHAN_SPREAD_SUPPORT, cap->chanSpreadSupport);
GETCAP(CAP_COMPRESS_SUPPORT, cap->compressSupport);
GETCAP(CAP_BURST_SUPPORT, cap->burstSupport);
GETCAP(CAP_FAST_FRAMES_SUPPORT, cap->fastFramesSupport);
GETCAP(CAP_CHAP_TUNING_SUPPORT, cap->chapTuningSupport);
GETCAP(CAP_TURBOG_SUPPORT, cap->turboGSupport);
GETCAP(CAP_TURBO_PRIME_SUPPORT, cap->turboPrimeSupport);
GETCAP(CAP_DEVICE_TYPE, cap->deviceType);
GETCAP(CAP_WME_SUPPORT, cap->wmeSupport);
GETCAP(CAP_TOTAL_QUEUES, cap->numTxQueues);
GETCAP(CAP_CONNECTION_ID_MAX, cap->connectionIdMax);
GETCAP(CAP_LOW_5GHZ_CHAN, cap->low5GhzChan);
GETCAP(CAP_HIGH_5GHZ_CHAN, cap->high5GhzChan);
GETCAP(CAP_LOW_2GHZ_CHAN, cap->low2GhzChan);
GETCAP(CAP_HIGH_2GHZ_CHAN, cap->high2GhzChan);
GETCAP(CAP_TWICE_ANTENNAGAIN_5G, cap->twiceAntennaGain5G);
GETCAP(CAP_TWICE_ANTENNAGAIN_2G, cap->twiceAntennaGain2G);
GETCAP(CAP_CIPHER_AES_CCM, cap->supportCipherAES_CCM);
GETCAP(CAP_CIPHER_TKIP, cap->supportCipherTKIP);
GETCAP(CAP_MIC_TKIP, cap->supportMicTKIP);
cap->supportCipherWEP = 1; /* NB: always available */
return (0);
}
static int
uath_get_devstatus(struct uath_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
{
int error;
/* retrieve MAC address */
error = uath_get_status(sc, ST_MAC_ADDR, macaddr, IEEE80211_ADDR_LEN);
if (error != 0) {
device_printf(sc->sc_dev, "could not read MAC address\n");
return (error);
}
error = uath_get_status(sc, ST_SERIAL_NUMBER,
&sc->sc_serial[0], sizeof(sc->sc_serial));
if (error != 0) {
device_printf(sc->sc_dev,
"could not read device serial number\n");
return (error);
}
return (0);
}
static int
uath_get_status(struct uath_softc *sc, uint32_t which, void *odata, int olen)
{
int error;
which = htobe32(which);
error = uath_cmd_read(sc, WDCMSG_TARGET_GET_STATUS,
&which, sizeof(which), odata, olen, UATH_CMD_FLAG_MAGIC);
if (error != 0)
device_printf(sc->sc_dev,
"could not read EEPROM offset 0x%02x\n", be32toh(which));
return (error);
}
static void
uath_free_data_list(struct uath_softc *sc, struct uath_data data[], int ndata,
int fillmbuf)
{
int i;
for (i = 0; i < ndata; i++) {
struct uath_data *dp = &data[i];
if (fillmbuf == 1) {
if (dp->m != NULL) {
m_freem(dp->m);
dp->m = NULL;
dp->buf = NULL;
}
} else {
dp->buf = NULL;
}
if (dp->ni != NULL) {
ieee80211_free_node(dp->ni);
dp->ni = NULL;
}
}
}
static int
uath_alloc_data_list(struct uath_softc *sc, struct uath_data data[],
int ndata, int maxsz, void *dma_buf)
{
int i, error;
for (i = 0; i < ndata; i++) {
struct uath_data *dp = &data[i];
dp->sc = sc;
if (dma_buf == NULL) {
/* XXX check maxsz */
dp->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (dp->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
dp->buf = mtod(dp->m, uint8_t *);
} else {
dp->m = NULL;
dp->buf = ((uint8_t *)dma_buf) + (i * maxsz);
}
dp->ni = NULL;
}
return (0);
fail: uath_free_data_list(sc, data, ndata, 1 /* free mbufs */);
return (error);
}
static int
uath_alloc_rx_data_list(struct uath_softc *sc)
{
int error, i;
/* XXX is it enough to store the RX packet with MCLBYTES bytes? */
error = uath_alloc_data_list(sc,
sc->sc_rx, UATH_RX_DATA_LIST_COUNT, MCLBYTES,
NULL /* setup mbufs */);
if (error != 0)
return (error);
STAILQ_INIT(&sc->sc_rx_active);
STAILQ_INIT(&sc->sc_rx_inactive);
for (i = 0; i < UATH_RX_DATA_LIST_COUNT; i++) {
STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i],
next);
UATH_STAT_INC(sc, st_rx_inactive);
}
return (0);
}
static int
uath_alloc_tx_data_list(struct uath_softc *sc)
{
int error, i;
error = uath_alloc_data_list(sc,
sc->sc_tx, UATH_TX_DATA_LIST_COUNT, UATH_MAX_TXBUFSZ,
sc->sc_tx_dma_buf);
if (error != 0)
return (error);
STAILQ_INIT(&sc->sc_tx_active);
STAILQ_INIT(&sc->sc_tx_inactive);
STAILQ_INIT(&sc->sc_tx_pending);
for (i = 0; i < UATH_TX_DATA_LIST_COUNT; i++) {
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i],
next);
UATH_STAT_INC(sc, st_tx_inactive);
}
return (0);
}
static void
uath_free_rx_data_list(struct uath_softc *sc)
{
uath_free_data_list(sc, sc->sc_rx, UATH_RX_DATA_LIST_COUNT,
1 /* free mbufs */);
}
static void
uath_free_tx_data_list(struct uath_softc *sc)
{
uath_free_data_list(sc, sc->sc_tx, UATH_TX_DATA_LIST_COUNT,
0 /* no mbufs */);
}
static struct ieee80211vap *
uath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct uath_vap *uvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return (NULL);
- uvp = (struct uath_vap *) malloc(sizeof(struct uath_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (uvp == NULL)
- return (NULL);
+ uvp = malloc(sizeof(struct uath_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &uvp->vap;
/* enable s/w bmiss handling for sta mode */
if (ieee80211_vap_setup(ic, vap, name, unit, opmode,
- flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) {
+ flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) {
/* out of memory */
free(uvp, M_80211_VAP);
return (NULL);
}
/* override state transition machine */
uvp->newstate = vap->iv_newstate;
vap->iv_newstate = uath_newstate;
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
- ieee80211_media_status);
+ ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return (vap);
}
static void
uath_vap_delete(struct ieee80211vap *vap)
{
struct uath_vap *uvp = UATH_VAP(vap);
ieee80211_vap_detach(vap);
free(uvp, M_80211_VAP);
}
static int
-uath_init_locked(void *arg)
+uath_init(struct uath_softc *sc)
{
- struct uath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t val;
int error;
UATH_ASSERT_LOCKED(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- uath_stop_locked(ifp);
+ if (sc->sc_flags & UATH_FLAG_INITDONE)
+ uath_stop(sc);
/* reset variables */
sc->sc_intrx_nextnum = sc->sc_msgid = 0;
val = htobe32(0);
uath_cmd_write(sc, WDCMSG_BIND, &val, sizeof val, 0);
/* set MAC address */
- uath_config_multi(sc, CFG_MAC_ADDR, IF_LLADDR(ifp), IEEE80211_ADDR_LEN);
+ uath_config_multi(sc, CFG_MAC_ADDR,
+ vap ? vap->iv_myaddr : ic->ic_macaddr, IEEE80211_ADDR_LEN);
/* XXX honor net80211 state */
uath_config(sc, CFG_RATE_CONTROL_ENABLE, 0x00000001);
uath_config(sc, CFG_DIVERSITY_CTL, 0x00000001);
uath_config(sc, CFG_ABOLT, 0x0000003f);
uath_config(sc, CFG_WME_ENABLED, 0x00000001);
uath_config(sc, CFG_SERVICE_TYPE, 1);
uath_config(sc, CFG_TP_SCALE, 0x00000000);
uath_config(sc, CFG_TPC_HALF_DBM5, 0x0000003c);
uath_config(sc, CFG_TPC_HALF_DBM2, 0x0000003c);
uath_config(sc, CFG_OVERRD_TX_POWER, 0x00000000);
uath_config(sc, CFG_GMODE_PROTECTION, 0x00000000);
uath_config(sc, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003);
uath_config(sc, CFG_PROTECTION_TYPE, 0x00000000);
uath_config(sc, CFG_MODE_CTS, 0x00000002);
error = uath_cmd_read(sc, WDCMSG_TARGET_START, NULL, 0,
&val, sizeof(val), UATH_CMD_FLAG_MAGIC);
if (error) {
device_printf(sc->sc_dev,
"could not start target, error %d\n", error);
goto fail;
}
DPRINTF(sc, UATH_DEBUG_INIT, "%s returns handle: 0x%x\n",
uath_codename(WDCMSG_TARGET_START), be32toh(val));
/* set default channel */
error = uath_switch_channel(sc, ic->ic_curchan);
if (error) {
device_printf(sc->sc_dev,
"could not switch channel, error %d\n", error);
goto fail;
}
val = htobe32(TARGET_DEVICE_AWAKE);
uath_cmd_write(sc, WDCMSG_SET_PWR_MODE, &val, sizeof val, 0);
/* XXX? check */
uath_cmd_write(sc, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0);
usbd_transfer_start(sc->sc_xfer[UATH_BULK_RX]);
/* enable Rx */
uath_set_rxfilter(sc, 0x0, UATH_FILTER_OP_INIT);
uath_set_rxfilter(sc,
UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON,
UATH_FILTER_OP_SET);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
sc->sc_flags |= UATH_FLAG_INITDONE;
callout_reset(&sc->watchdog_ch, hz, uath_watchdog, sc);
return (0);
fail:
- uath_stop_locked(ifp);
+ uath_stop(sc);
return (error);
}
static void
-uath_init(void *arg)
+uath_stop(struct uath_softc *sc)
{
- struct uath_softc *sc = arg;
- UATH_LOCK(sc);
- (void)uath_init_locked(sc);
- UATH_UNLOCK(sc);
-}
-
-static void
-uath_stop_locked(struct ifnet *ifp)
-{
- struct uath_softc *sc = ifp->if_softc;
-
UATH_ASSERT_LOCKED(sc);
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
sc->sc_flags &= ~UATH_FLAG_INITDONE;
callout_stop(&sc->stat_ch);
callout_stop(&sc->watchdog_ch);
sc->sc_tx_timer = 0;
/* abort pending transmits */
uath_abort_xfers(sc);
/* flush data & control requests into the target */
(void)uath_flush(sc);
/* set a LED status to the disconnected. */
uath_set_ledstate(sc, 0);
/* stop the target */
uath_cmd_write(sc, WDCMSG_TARGET_STOP, NULL, 0, 0);
}
-static void
-uath_stop(struct ifnet *ifp)
-{
- struct uath_softc *sc = ifp->if_softc;
-
- UATH_LOCK(sc);
- uath_stop_locked(ifp);
- UATH_UNLOCK(sc);
-}
-
static int
uath_config(struct uath_softc *sc, uint32_t reg, uint32_t val)
{
struct uath_write_mac write;
int error;
write.reg = htobe32(reg);
write.len = htobe32(0); /* 0 = single write */
*(uint32_t *)write.data = htobe32(val);
error = uath_cmd_write(sc, WDCMSG_TARGET_SET_CONFIG, &write,
3 * sizeof (uint32_t), 0);
if (error != 0) {
device_printf(sc->sc_dev, "could not write register 0x%02x\n",
reg);
}
return (error);
}
static int
uath_config_multi(struct uath_softc *sc, uint32_t reg, const void *data,
int len)
{
struct uath_write_mac write;
int error;
write.reg = htobe32(reg);
write.len = htobe32(len);
bcopy(data, write.data, len);
/* properly handle the case where len is zero (reset) */
error = uath_cmd_write(sc, WDCMSG_TARGET_SET_CONFIG, &write,
(len == 0) ? sizeof (uint32_t) : 2 * sizeof (uint32_t) + len, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not write %d bytes to register 0x%02x\n", len, reg);
}
return (error);
}
static int
uath_switch_channel(struct uath_softc *sc, struct ieee80211_channel *c)
{
int error;
UATH_ASSERT_LOCKED(sc);
/* set radio frequency */
error = uath_set_chan(sc, c);
if (error) {
device_printf(sc->sc_dev,
"could not set channel, error %d\n", error);
goto failed;
}
/* reset Tx rings */
error = uath_reset_tx_queues(sc);
if (error) {
device_printf(sc->sc_dev,
"could not reset Tx queues, error %d\n", error);
goto failed;
}
/* set Tx rings WME properties */
error = uath_wme_init(sc);
if (error) {
device_printf(sc->sc_dev,
"could not init Tx queues, error %d\n", error);
goto failed;
}
error = uath_set_ledstate(sc, 0);
if (error) {
device_printf(sc->sc_dev,
"could not set led state, error %d\n", error);
goto failed;
}
error = uath_flush(sc);
if (error) {
device_printf(sc->sc_dev,
"could not flush pipes, error %d\n", error);
goto failed;
}
failed:
return (error);
}
static int
uath_set_rxfilter(struct uath_softc *sc, uint32_t bits, uint32_t op)
{
struct uath_cmd_rx_filter rxfilter;
rxfilter.bits = htobe32(bits);
rxfilter.op = htobe32(op);
DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL,
"setting Rx filter=0x%x flags=0x%x\n", bits, op);
return uath_cmd_write(sc, WDCMSG_RX_FILTER, &rxfilter,
sizeof rxfilter, 0);
}
static void
uath_watchdog(void *arg)
{
struct uath_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
if (sc->sc_tx_timer > 0) {
if (--sc->sc_tx_timer == 0) {
device_printf(sc->sc_dev, "device timeout\n");
- /*uath_init(ifp); XXX needs a process context! */
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ /*uath_init(sc); XXX needs a process context! */
+ counter_u64_add(ic->ic_oerrors, 1);
return;
}
callout_reset(&sc->watchdog_ch, hz, uath_watchdog, sc);
}
}
static void
uath_abort_xfers(struct uath_softc *sc)
{
int i;
UATH_ASSERT_LOCKED(sc);
/* abort any pending transfers */
for (i = 0; i < UATH_N_XFERS; i++)
usbd_transfer_stop(sc->sc_xfer[i]);
}
static int
uath_flush(struct uath_softc *sc)
{
int error;
error = uath_dataflush(sc);
if (error != 0)
goto failed;
error = uath_cmdflush(sc);
if (error != 0)
goto failed;
failed:
return (error);
}
static int
uath_cmdflush(struct uath_softc *sc)
{
return uath_cmd_write(sc, WDCMSG_FLUSH, NULL, 0, 0);
}
static int
uath_dataflush(struct uath_softc *sc)
{
struct uath_data *data;
struct uath_chunk *chunk;
struct uath_tx_desc *desc;
UATH_ASSERT_LOCKED(sc);
data = uath_getbuf(sc);
if (data == NULL)
return (ENOBUFS);
data->buflen = sizeof(struct uath_chunk) + sizeof(struct uath_tx_desc);
data->m = NULL;
data->ni = NULL;
chunk = (struct uath_chunk *)data->buf;
desc = (struct uath_tx_desc *)(chunk + 1);
/* one chunk only */
chunk->seqnum = 0;
chunk->flags = UATH_CFLAGS_FINAL;
chunk->length = htobe16(sizeof (struct uath_tx_desc));
memset(desc, 0, sizeof(struct uath_tx_desc));
desc->msglen = htobe32(sizeof(struct uath_tx_desc));
desc->msgid = (sc->sc_msgid++) + 1; /* don't care about endianness */
desc->type = htobe32(WDCMSG_FLUSH);
desc->txqid = htobe32(0);
desc->connid = htobe32(0);
desc->flags = htobe32(0);
#ifdef UATH_DEBUG
if (sc->sc_debug & UATH_DEBUG_CMDS) {
DPRINTF(sc, UATH_DEBUG_RESET, "send flush ix %d\n",
desc->msgid);
if (sc->sc_debug & UATH_DEBUG_CMDS_DUMP)
uath_dump_cmd(data->buf, data->buflen, '+');
}
#endif
STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next);
UATH_STAT_INC(sc, st_tx_pending);
sc->sc_tx_timer = 5;
usbd_transfer_start(sc->sc_xfer[UATH_BULK_TX]);
return (0);
}
static struct uath_data *
_uath_getbuf(struct uath_softc *sc)
{
struct uath_data *bf;
bf = STAILQ_FIRST(&sc->sc_tx_inactive);
if (bf != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next);
UATH_STAT_DEC(sc, st_tx_inactive);
} else
bf = NULL;
if (bf == NULL)
DPRINTF(sc, UATH_DEBUG_XMIT, "%s: %s\n", __func__,
"out of xmit buffers");
return (bf);
}
static struct uath_data *
uath_getbuf(struct uath_softc *sc)
{
struct uath_data *bf;
UATH_ASSERT_LOCKED(sc);
bf = _uath_getbuf(sc);
- if (bf == NULL) {
- struct ifnet *ifp = sc->sc_ifp;
-
+ if (bf == NULL)
DPRINTF(sc, UATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- }
return (bf);
}
static int
uath_set_ledstate(struct uath_softc *sc, int connected)
{
DPRINTF(sc, UATH_DEBUG_LED,
"set led state %sconnected\n", connected ? "" : "!");
connected = htobe32(connected);
return uath_cmd_write(sc, WDCMSG_SET_LED_STATE,
&connected, sizeof connected, 0);
}
static int
uath_set_chan(struct uath_softc *sc, struct ieee80211_channel *c)
{
#ifdef UATH_DEBUG
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
#endif
struct uath_cmd_reset reset;
memset(&reset, 0, sizeof(reset));
if (IEEE80211_IS_CHAN_2GHZ(c))
reset.flags |= htobe32(UATH_CHAN_2GHZ);
if (IEEE80211_IS_CHAN_5GHZ(c))
reset.flags |= htobe32(UATH_CHAN_5GHZ);
/* NB: 11g =>'s 11b so don't specify both OFDM and CCK */
if (IEEE80211_IS_CHAN_OFDM(c))
reset.flags |= htobe32(UATH_CHAN_OFDM);
else if (IEEE80211_IS_CHAN_CCK(c))
reset.flags |= htobe32(UATH_CHAN_CCK);
/* turbo can be used in either 2GHz or 5GHz */
if (c->ic_flags & IEEE80211_CHAN_TURBO)
reset.flags |= htobe32(UATH_CHAN_TURBO);
reset.freq = htobe32(c->ic_freq);
reset.maxrdpower = htobe32(50); /* XXX */
reset.channelchange = htobe32(1);
reset.keeprccontent = htobe32(0);
DPRINTF(sc, UATH_DEBUG_CHANNEL, "set channel %d, flags 0x%x freq %u\n",
ieee80211_chan2ieee(ic, c),
be32toh(reset.flags), be32toh(reset.freq));
return uath_cmd_write(sc, WDCMSG_RESET, &reset, sizeof reset, 0);
}
static int
uath_reset_tx_queues(struct uath_softc *sc)
{
int ac, error;
DPRINTF(sc, UATH_DEBUG_RESET, "%s: reset Tx queues\n", __func__);
for (ac = 0; ac < 4; ac++) {
const uint32_t qid = htobe32(ac);
error = uath_cmd_write(sc, WDCMSG_RELEASE_TX_QUEUE, &qid,
sizeof qid, 0);
if (error != 0)
break;
}
return (error);
}
static int
uath_wme_init(struct uath_softc *sc)
{
/* XXX get from net80211 */
static const struct uath_wme_settings uath_wme_11g[4] = {
{ 7, 4, 10, 0, 0 }, /* Background */
{ 3, 4, 10, 0, 0 }, /* Best-Effort */
{ 3, 3, 4, 26, 0 }, /* Video */
{ 2, 2, 3, 47, 0 } /* Voice */
};
struct uath_cmd_txq_setup qinfo;
int ac, error;
DPRINTF(sc, UATH_DEBUG_WME, "%s: setup Tx queues\n", __func__);
for (ac = 0; ac < 4; ac++) {
qinfo.qid = htobe32(ac);
qinfo.len = htobe32(sizeof(qinfo.attr));
qinfo.attr.priority = htobe32(ac); /* XXX */
qinfo.attr.aifs = htobe32(uath_wme_11g[ac].aifsn);
qinfo.attr.logcwmin = htobe32(uath_wme_11g[ac].logcwmin);
qinfo.attr.logcwmax = htobe32(uath_wme_11g[ac].logcwmax);
qinfo.attr.bursttime = htobe32(UATH_TXOP_TO_US(
uath_wme_11g[ac].txop));
qinfo.attr.mode = htobe32(uath_wme_11g[ac].acm);/*XXX? */
qinfo.attr.qflags = htobe32(1); /* XXX? */
error = uath_cmd_write(sc, WDCMSG_SETUP_TX_QUEUE, &qinfo,
sizeof qinfo, 0);
if (error != 0)
break;
}
return (error);
}
-static int
-uath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+uath_parent(struct ieee80211com *ic)
{
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
struct uath_softc *sc = ic->ic_softc;
- int error;
int startall = 0;
UATH_LOCK(sc);
- error = (sc->sc_flags & UATH_FLAG_INVALID) ? ENXIO : 0;
- UATH_UNLOCK(sc);
- if (error)
- return (error);
-
- switch (cmd) {
- case SIOCSIFFLAGS:
- if (ifp->if_flags & IFF_UP) {
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- uath_init(sc);
- startall = 1;
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- uath_stop(ifp);
- }
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
+ if (sc->sc_flags & UATH_FLAG_INVALID) {
+ UATH_UNLOCK(sc);
+ return;
}
- return (error);
+ if (ic->ic_nrunning > 0) {
+ if (!(sc->sc_flags & UATH_FLAG_INITDONE)) {
+ uath_init(sc);
+ startall = 1;
+ }
+ } else if (sc->sc_flags & UATH_FLAG_INITDONE)
+ uath_stop(sc);
+ UATH_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static int
uath_tx_start(struct uath_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
struct uath_data *data)
{
struct ieee80211vap *vap = ni->ni_vap;
struct uath_chunk *chunk;
struct uath_tx_desc *desc;
const struct ieee80211_frame *wh;
struct ieee80211_key *k;
int framelen, msglen;
UATH_ASSERT_LOCKED(sc);
data->ni = ni;
data->m = m0;
chunk = (struct uath_chunk *)data->buf;
desc = (struct uath_tx_desc *)(chunk + 1);
if (ieee80211_radiotap_active_vap(vap)) {
struct uath_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
if (m0->m_flags & M_FRAG)
tap->wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
ieee80211_radiotap_tx(vap, m0);
}
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return (ENOBUFS);
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
m_copydata(m0, 0, m0->m_pkthdr.len, (uint8_t *)(desc + 1));
framelen = m0->m_pkthdr.len + IEEE80211_CRC_LEN;
msglen = framelen + sizeof (struct uath_tx_desc);
data->buflen = msglen + sizeof (struct uath_chunk);
/* one chunk only for now */
chunk->seqnum = sc->sc_seqnum++;
chunk->flags = (m0->m_flags & M_FRAG) ? 0 : UATH_CFLAGS_FINAL;
if (m0->m_flags & M_LASTFRAG)
chunk->flags |= UATH_CFLAGS_FINAL;
chunk->flags = UATH_CFLAGS_FINAL;
chunk->length = htobe16(msglen);
/* fill Tx descriptor */
desc->msglen = htobe32(msglen);
/* NB: to get UATH_TX_NOTIFY reply, `msgid' must be larger than 0 */
desc->msgid = (sc->sc_msgid++) + 1; /* don't care about endianness */
desc->type = htobe32(WDCMSG_SEND);
switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
case IEEE80211_FC0_TYPE_CTL:
case IEEE80211_FC0_TYPE_MGT:
/* NB: force all management frames to highest queue */
if (ni->ni_flags & IEEE80211_NODE_QOS) {
/* NB: force all management frames to highest queue */
desc->txqid = htobe32(WME_AC_VO | UATH_TXQID_MINRATE);
} else
desc->txqid = htobe32(WME_AC_BE | UATH_TXQID_MINRATE);
break;
case IEEE80211_FC0_TYPE_DATA:
/* XXX multicast frames should honor mcastrate */
desc->txqid = htobe32(M_WME_GETAC(m0));
break;
default:
device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n",
wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
m_freem(m0);
return (EIO);
}
if (vap->iv_state == IEEE80211_S_AUTH ||
vap->iv_state == IEEE80211_S_ASSOC ||
vap->iv_state == IEEE80211_S_RUN)
desc->connid = htobe32(UATH_ID_BSS);
else
desc->connid = htobe32(UATH_ID_INVALID);
desc->flags = htobe32(0 /* no UATH_TX_NOTIFY */);
desc->buflen = htobe32(m0->m_pkthdr.len);
#ifdef UATH_DEBUG
DPRINTF(sc, UATH_DEBUG_XMIT,
"send frame ix %u framelen %d msglen %d connid 0x%x txqid 0x%x\n",
desc->msgid, framelen, msglen, be32toh(desc->connid),
be32toh(desc->txqid));
if (sc->sc_debug & UATH_DEBUG_XMIT_DUMP)
uath_dump_cmd(data->buf, data->buflen, '+');
#endif
STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next);
UATH_STAT_INC(sc, st_tx_pending);
usbd_transfer_start(sc->sc_xfer[UATH_BULK_TX]);
return (0);
}
/*
* Cleanup driver resources when we run out of buffers while processing
* fragments; return the tx buffers allocated and drop node references.
*/
static void
uath_txfrag_cleanup(struct uath_softc *sc,
uath_datahead *frags, struct ieee80211_node *ni)
{
struct uath_data *bf, *next;
UATH_ASSERT_LOCKED(sc);
STAILQ_FOREACH_SAFE(bf, frags, next, next) {
/* NB: bf assumed clean */
STAILQ_REMOVE_HEAD(frags, next);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
UATH_STAT_INC(sc, st_tx_inactive);
ieee80211_node_decref(ni);
}
}
/*
* Setup xmit of a fragmented frame. Allocate a buffer for each frag and bump
* the node reference count to reflect the held reference to be setup by
* uath_tx_start.
*/
static int
uath_txfrag_setup(struct uath_softc *sc, uath_datahead *frags,
struct mbuf *m0, struct ieee80211_node *ni)
{
struct mbuf *m;
struct uath_data *bf;
UATH_ASSERT_LOCKED(sc);
for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
bf = uath_getbuf(sc);
if (bf == NULL) { /* out of buffers, cleanup */
uath_txfrag_cleanup(sc, frags, ni);
break;
}
ieee80211_node_incref(ni);
STAILQ_INSERT_TAIL(frags, bf, next);
}
return !STAILQ_EMPTY(frags);
}
/*
* Reclaim mbuf resources. For fragmented frames we need to claim each frag
* chained with m_nextpkt.
*/
static void
uath_freetx(struct mbuf *m)
{
struct mbuf *next;
do {
next = m->m_nextpkt;
m->m_nextpkt = NULL;
m_freem(m);
} while ((m = next) != NULL);
}
+static int
+uath_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct uath_softc *sc = ic->ic_softc;
+ int error;
+
+ UATH_LOCK(sc);
+ if ((sc->sc_flags & UATH_FLAG_INITDONE) == 0) {
+ UATH_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ UATH_UNLOCK(sc);
+ return (error);
+ }
+ uath_start(sc);
+ UATH_UNLOCK(sc);
+
+ return (0);
+}
+
static void
-uath_start(struct ifnet *ifp)
+uath_start(struct uath_softc *sc)
{
struct uath_data *bf;
- struct uath_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m, *next;
uath_datahead frags;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ UATH_ASSERT_LOCKED(sc);
+
+ if ((sc->sc_flags & UATH_FLAG_INITDONE) == 0 ||
(sc->sc_flags & UATH_FLAG_INVALID))
return;
- UATH_LOCK(sc);
- for (;;) {
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
bf = uath_getbuf(sc);
- if (bf == NULL)
+ if (bf == NULL) {
+ mbufq_prepend(&sc->sc_snd, m);
break;
-
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL) {
- STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
- UATH_STAT_INC(sc, st_tx_inactive);
- break;
}
+
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
m->m_pkthdr.rcvif = NULL;
/*
* Check for fragmentation. If this frame has been broken up
* verify we have enough buffers to send all the fragments
* so all go out or none...
*/
STAILQ_INIT(&frags);
if ((m->m_flags & M_FRAG) &&
!uath_txfrag_setup(sc, &frags, m, ni)) {
DPRINTF(sc, UATH_DEBUG_XMIT,
"%s: out of txfrag buffers\n", __func__);
uath_freetx(m);
goto bad;
}
sc->sc_seqnum = 0;
nextfrag:
/*
* Pass the frame to the h/w for transmission.
* Fragmented frames have each frag chained together
* with m_nextpkt. We know there are sufficient uath_data's
* to send all the frags because of work done by
* uath_txfrag_setup.
*/
next = m->m_nextpkt;
if (uath_tx_start(sc, m, ni, bf) != 0) {
bad:
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
reclaim:
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
UATH_STAT_INC(sc, st_tx_inactive);
uath_txfrag_cleanup(sc, &frags, ni);
ieee80211_free_node(ni);
continue;
}
if (next != NULL) {
/*
* Beware of state changing between frags.
XXX check sta power-save state?
*/
if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
DPRINTF(sc, UATH_DEBUG_XMIT,
"%s: flush fragmented packet, state %s\n",
__func__,
ieee80211_state_name[ni->ni_vap->iv_state]);
uath_freetx(next);
goto reclaim;
}
m = next;
bf = STAILQ_FIRST(&frags);
KASSERT(bf != NULL, ("no buf for txfrag"));
STAILQ_REMOVE_HEAD(&frags, next);
goto nextfrag;
}
sc->sc_tx_timer = 5;
}
- UATH_UNLOCK(sc);
}
static int
uath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct uath_data *bf;
struct uath_softc *sc = ic->ic_softc;
+ UATH_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
if ((sc->sc_flags & UATH_FLAG_INVALID) ||
- !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ !(sc->sc_flags & UATH_FLAG_INITDONE)) {
m_freem(m);
ieee80211_free_node(ni);
+ UATH_UNLOCK(sc);
return (ENETDOWN);
}
- UATH_LOCK(sc);
/* grab a TX buffer */
bf = uath_getbuf(sc);
if (bf == NULL) {
ieee80211_free_node(ni);
m_freem(m);
UATH_UNLOCK(sc);
return (ENOBUFS);
}
sc->sc_seqnum = 0;
if (uath_tx_start(sc, m, ni, bf) != 0) {
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
UATH_STAT_INC(sc, st_tx_inactive);
UATH_UNLOCK(sc);
return (EIO);
}
UATH_UNLOCK(sc);
sc->sc_tx_timer = 5;
return (0);
}
static void
uath_scan_start(struct ieee80211com *ic)
{
/* do nothing */
}
static void
uath_scan_end(struct ieee80211com *ic)
{
/* do nothing */
}
static void
uath_set_channel(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
struct uath_softc *sc = ic->ic_softc;
UATH_LOCK(sc);
if ((sc->sc_flags & UATH_FLAG_INVALID) ||
- (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ (sc->sc_flags & UATH_FLAG_INITDONE) == 0) {
UATH_UNLOCK(sc);
return;
}
(void)uath_switch_channel(sc, ic->ic_curchan);
UATH_UNLOCK(sc);
}
static int
uath_set_rxmulti_filter(struct uath_softc *sc)
{
/* XXX broken */
return (0);
}
static void
uath_update_mcast(struct ieee80211com *ic)
{
struct uath_softc *sc = ic->ic_softc;
UATH_LOCK(sc);
if ((sc->sc_flags & UATH_FLAG_INVALID) ||
- (ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ (sc->sc_flags & UATH_FLAG_INITDONE) == 0) {
UATH_UNLOCK(sc);
return;
}
/*
* this is for avoiding the race condition when we're try to
* connect to the AP with WPA.
*/
if (sc->sc_flags & UATH_FLAG_INITDONE)
(void)uath_set_rxmulti_filter(sc);
UATH_UNLOCK(sc);
}
static void
uath_update_promisc(struct ieee80211com *ic)
{
struct uath_softc *sc = ic->ic_softc;
UATH_LOCK(sc);
if ((sc->sc_flags & UATH_FLAG_INVALID) ||
- (ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ (sc->sc_flags & UATH_FLAG_INITDONE) == 0) {
UATH_UNLOCK(sc);
return;
}
if (sc->sc_flags & UATH_FLAG_INITDONE) {
uath_set_rxfilter(sc,
UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON |
UATH_FILTER_RX_PROM, UATH_FILTER_OP_SET);
}
UATH_UNLOCK(sc);
}
static int
uath_create_connection(struct uath_softc *sc, uint32_t connid)
{
const struct ieee80211_rateset *rs;
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni;
struct uath_cmd_create_connection create;
ni = ieee80211_ref_node(vap->iv_bss);
memset(&create, 0, sizeof(create));
create.connid = htobe32(connid);
create.bssid = htobe32(0);
/* XXX packed or not? */
create.size = htobe32(sizeof(struct uath_cmd_rateset));
rs = &ni->ni_rates;
create.connattr.rateset.length = rs->rs_nrates;
bcopy(rs->rs_rates, &create.connattr.rateset.set[0],
rs->rs_nrates);
/* XXX turbo */
if (IEEE80211_IS_CHAN_A(ni->ni_chan))
create.connattr.wlanmode = htobe32(WLAN_MODE_11a);
else if (IEEE80211_IS_CHAN_ANYG(ni->ni_chan))
create.connattr.wlanmode = htobe32(WLAN_MODE_11g);
else
create.connattr.wlanmode = htobe32(WLAN_MODE_11b);
ieee80211_free_node(ni);
return uath_cmd_write(sc, WDCMSG_CREATE_CONNECTION, &create,
sizeof create, 0);
}
static int
uath_set_rates(struct uath_softc *sc, const struct ieee80211_rateset *rs)
{
struct uath_cmd_rates rates;
memset(&rates, 0, sizeof(rates));
rates.connid = htobe32(UATH_ID_BSS); /* XXX */
rates.size = htobe32(sizeof(struct uath_cmd_rateset));
/* XXX bounds check rs->rs_nrates */
rates.rateset.length = rs->rs_nrates;
bcopy(rs->rs_rates, &rates.rateset.set[0], rs->rs_nrates);
DPRINTF(sc, UATH_DEBUG_RATES,
"setting supported rates nrates=%d\n", rs->rs_nrates);
return uath_cmd_write(sc, WDCMSG_SET_BASIC_RATE,
&rates, sizeof rates, 0);
}
static int
uath_write_associd(struct uath_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni;
struct uath_cmd_set_associd associd;
ni = ieee80211_ref_node(vap->iv_bss);
memset(&associd, 0, sizeof(associd));
associd.defaultrateix = htobe32(1); /* XXX */
associd.associd = htobe32(ni->ni_associd);
associd.timoffset = htobe32(0x3b); /* XXX */
IEEE80211_ADDR_COPY(associd.bssid, ni->ni_bssid);
ieee80211_free_node(ni);
return uath_cmd_write(sc, WDCMSG_WRITE_ASSOCID, &associd,
sizeof associd, 0);
}
static int
uath_set_ledsteady(struct uath_softc *sc, int lednum, int ledmode)
{
struct uath_cmd_ledsteady led;
led.lednum = htobe32(lednum);
led.ledmode = htobe32(ledmode);
DPRINTF(sc, UATH_DEBUG_LED, "set %s led %s (steady)\n",
(lednum == UATH_LED_LINK) ? "link" : "activity",
ledmode ? "on" : "off");
return uath_cmd_write(sc, WDCMSG_SET_LED_STEADY, &led, sizeof led, 0);
}
static int
uath_set_ledblink(struct uath_softc *sc, int lednum, int ledmode,
int blinkrate, int slowmode)
{
struct uath_cmd_ledblink led;
led.lednum = htobe32(lednum);
led.ledmode = htobe32(ledmode);
led.blinkrate = htobe32(blinkrate);
led.slowmode = htobe32(slowmode);
DPRINTF(sc, UATH_DEBUG_LED, "set %s led %s (blink)\n",
(lednum == UATH_LED_LINK) ? "link" : "activity",
ledmode ? "on" : "off");
return uath_cmd_write(sc, WDCMSG_SET_LED_BLINK, &led, sizeof led, 0);
}
static int
uath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
enum ieee80211_state ostate = vap->iv_state;
int error;
struct ieee80211_node *ni;
struct ieee80211com *ic = vap->iv_ic;
struct uath_softc *sc = ic->ic_softc;
struct uath_vap *uvp = UATH_VAP(vap);
DPRINTF(sc, UATH_DEBUG_STATE,
"%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
UATH_LOCK(sc);
callout_stop(&sc->stat_ch);
callout_stop(&sc->watchdog_ch);
ni = ieee80211_ref_node(vap->iv_bss);
switch (nstate) {
case IEEE80211_S_INIT:
if (ostate == IEEE80211_S_RUN) {
/* turn link and activity LEDs off */
uath_set_ledstate(sc, 0);
}
break;
case IEEE80211_S_SCAN:
break;
case IEEE80211_S_AUTH:
/* XXX good place? set RTS threshold */
uath_config(sc, CFG_USER_RTS_THRESHOLD, vap->iv_rtsthreshold);
/* XXX bad place */
error = uath_set_keys(sc, vap);
if (error != 0) {
device_printf(sc->sc_dev,
"could not set crypto keys, error %d\n", error);
break;
}
if (uath_switch_channel(sc, ni->ni_chan) != 0) {
device_printf(sc->sc_dev, "could not switch channel\n");
break;
}
if (uath_create_connection(sc, UATH_ID_BSS) != 0) {
device_printf(sc->sc_dev,
"could not create connection\n");
break;
}
break;
case IEEE80211_S_ASSOC:
if (uath_set_rates(sc, &ni->ni_rates) != 0) {
device_printf(sc->sc_dev,
"could not set negotiated rate set\n");
break;
}
break;
case IEEE80211_S_RUN:
/* XXX monitor mode doesn't be tested */
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
uath_set_ledstate(sc, 1);
break;
}
/*
* Tx rate is controlled by firmware, report the maximum
* negotiated rate in ifconfig output.
*/
ni->ni_txrate = ni->ni_rates.rs_rates[ni->ni_rates.rs_nrates-1];
if (uath_write_associd(sc) != 0) {
device_printf(sc->sc_dev,
"could not write association id\n");
break;
}
/* turn link LED on */
uath_set_ledsteady(sc, UATH_LED_LINK, UATH_LED_ON);
/* make activity LED blink */
uath_set_ledblink(sc, UATH_LED_ACTIVITY, UATH_LED_ON, 1, 2);
/* set state to associated */
uath_set_ledstate(sc, 1);
/* start statistics timer */
callout_reset(&sc->stat_ch, hz, uath_stat, sc);
break;
default:
break;
}
ieee80211_free_node(ni);
UATH_UNLOCK(sc);
IEEE80211_LOCK(ic);
return (uvp->newstate(vap, nstate, arg));
}
static int
uath_set_key(struct uath_softc *sc, const struct ieee80211_key *wk,
int index)
{
#if 0
struct uath_cmd_crypto crypto;
int i;
memset(&crypto, 0, sizeof(crypto));
crypto.keyidx = htobe32(index);
crypto.magic1 = htobe32(1);
crypto.size = htobe32(368);
crypto.mask = htobe32(0xffff);
crypto.flags = htobe32(0x80000068);
if (index != UATH_DEFAULT_KEY)
crypto.flags |= htobe32(index << 16);
memset(crypto.magic2, 0xff, sizeof(crypto.magic2));
/*
* Each byte of the key must be XOR'ed with 10101010 before being
* transmitted to the firmware.
*/
for (i = 0; i < wk->wk_keylen; i++)
crypto.key[i] = wk->wk_key[i] ^ 0xaa;
DPRINTF(sc, UATH_DEBUG_CRYPTO,
"setting crypto key index=%d len=%d\n", index, wk->wk_keylen);
return uath_cmd_write(sc, WDCMSG_SET_KEY_CACHE_ENTRY, &crypto,
sizeof crypto, 0);
#else
/* XXX support H/W cryto */
return (0);
#endif
}
static int
uath_set_keys(struct uath_softc *sc, struct ieee80211vap *vap)
{
int i, error;
error = 0;
for (i = 0; i < IEEE80211_WEP_NKID; i++) {
const struct ieee80211_key *wk = &vap->iv_nw_keys[i];
if (wk->wk_flags & (IEEE80211_KEY_XMIT|IEEE80211_KEY_RECV)) {
error = uath_set_key(sc, wk, i);
if (error)
return (error);
}
}
if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE) {
error = uath_set_key(sc, &vap->iv_nw_keys[vap->iv_def_txkey],
UATH_DEFAULT_KEY);
}
return (error);
}
#define UATH_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
static void
uath_sysctl_node(struct uath_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child;
struct sysctl_oid *tree;
struct uath_stat *stats;
stats = &sc->sc_stat;
ctx = device_get_sysctl_ctx(sc->sc_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
NULL, "UATH statistics");
child = SYSCTL_CHILDREN(tree);
UATH_SYSCTL_STAT_ADD32(ctx, child, "badchunkseqnum",
&stats->st_badchunkseqnum, "Bad chunk sequence numbers");
UATH_SYSCTL_STAT_ADD32(ctx, child, "invalidlen", &stats->st_invalidlen,
"Invalid length");
UATH_SYSCTL_STAT_ADD32(ctx, child, "multichunk", &stats->st_multichunk,
"Multi chunks");
UATH_SYSCTL_STAT_ADD32(ctx, child, "toobigrxpkt",
&stats->st_toobigrxpkt, "Too big rx packets");
UATH_SYSCTL_STAT_ADD32(ctx, child, "stopinprogress",
&stats->st_stopinprogress, "Stop in progress");
UATH_SYSCTL_STAT_ADD32(ctx, child, "crcerrs", &stats->st_crcerr,
"CRC errors");
UATH_SYSCTL_STAT_ADD32(ctx, child, "phyerr", &stats->st_phyerr,
"PHY errors");
UATH_SYSCTL_STAT_ADD32(ctx, child, "decrypt_crcerr",
&stats->st_decrypt_crcerr, "Decryption CRC errors");
UATH_SYSCTL_STAT_ADD32(ctx, child, "decrypt_micerr",
&stats->st_decrypt_micerr, "Decryption Misc errors");
UATH_SYSCTL_STAT_ADD32(ctx, child, "decomperr", &stats->st_decomperr,
"Decomp errors");
UATH_SYSCTL_STAT_ADD32(ctx, child, "keyerr", &stats->st_keyerr,
"Key errors");
UATH_SYSCTL_STAT_ADD32(ctx, child, "err", &stats->st_err,
"Unknown errors");
UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_active",
&stats->st_cmd_active, "Active numbers in Command queue");
UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_inactive",
&stats->st_cmd_inactive, "Inactive numbers in Command queue");
UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_pending",
&stats->st_cmd_pending, "Pending numbers in Command queue");
UATH_SYSCTL_STAT_ADD32(ctx, child, "cmd_waiting",
&stats->st_cmd_waiting, "Waiting numbers in Command queue");
UATH_SYSCTL_STAT_ADD32(ctx, child, "rx_active",
&stats->st_rx_active, "Active numbers in RX queue");
UATH_SYSCTL_STAT_ADD32(ctx, child, "rx_inactive",
&stats->st_rx_inactive, "Inactive numbers in RX queue");
UATH_SYSCTL_STAT_ADD32(ctx, child, "tx_active",
&stats->st_tx_active, "Active numbers in TX queue");
UATH_SYSCTL_STAT_ADD32(ctx, child, "tx_inactive",
&stats->st_tx_inactive, "Inactive numbers in TX queue");
UATH_SYSCTL_STAT_ADD32(ctx, child, "tx_pending",
&stats->st_tx_pending, "Pending numbers in TX queue");
}
#undef UATH_SYSCTL_STAT_ADD32
static void
uath_cmdeof(struct uath_softc *sc, struct uath_cmd *cmd)
{
struct uath_cmd_hdr *hdr;
int dlen;
hdr = (struct uath_cmd_hdr *)cmd->buf;
/* NB: msgid is passed thru w/o byte swapping */
#ifdef UATH_DEBUG
if (sc->sc_debug & UATH_DEBUG_CMDS) {
int len = be32toh(hdr->len);
printf("%s: %s [ix %u] len %u status %u\n",
__func__, uath_codename(be32toh(hdr->code)),
hdr->msgid, len, be32toh(hdr->magic));
if (sc->sc_debug & UATH_DEBUG_CMDS_DUMP)
uath_dump_cmd(cmd->buf,
len > UATH_MAX_CMDSZ ? sizeof(*hdr) : len, '-');
}
#endif
hdr->code = be32toh(hdr->code);
hdr->len = be32toh(hdr->len);
hdr->magic = be32toh(hdr->magic); /* target status on return */
switch (hdr->code & 0xff) {
/* reply to a read command */
default:
dlen = hdr->len - sizeof(*hdr);
if (dlen < 0) {
device_printf(sc->sc_dev,
"Invalid header length %d\n", dlen);
return;
}
DPRINTF(sc, UATH_DEBUG_RX_PROC | UATH_DEBUG_RECV_ALL,
"%s: code %d data len %u\n",
__func__, hdr->code & 0xff, dlen);
/*
* The first response from the target after the
* HOST_AVAILABLE has an invalid msgid so we must
* treat it specially.
*/
if (hdr->msgid < UATH_CMD_LIST_COUNT) {
uint32_t *rp = (uint32_t *)(hdr+1);
u_int olen;
if (!(sizeof(*hdr) <= hdr->len &&
hdr->len < UATH_MAX_CMDSZ)) {
device_printf(sc->sc_dev,
"%s: invalid WDC msg length %u; "
"msg ignored\n", __func__, hdr->len);
return;
}
/*
* Calculate return/receive payload size; the
* first word, if present, always gives the
* number of bytes--unless it's 0 in which
* case a single 32-bit word should be present.
*/
if (dlen >= (int)sizeof(uint32_t)) {
olen = be32toh(rp[0]);
dlen -= sizeof(uint32_t);
if (olen == 0) {
/* convention is 0 =>'s one word */
olen = sizeof(uint32_t);
/* XXX KASSERT(olen == dlen ) */
}
} else
olen = 0;
if (cmd->odata != NULL) {
/* NB: cmd->olen validated in uath_cmd */
if (olen > (u_int)cmd->olen) {
/* XXX complain? */
device_printf(sc->sc_dev,
"%s: cmd 0x%x olen %u cmd olen %u\n",
__func__, hdr->code, olen,
cmd->olen);
olen = cmd->olen;
}
if (olen > (u_int)dlen) {
/* XXX complain, shouldn't happen */
device_printf(sc->sc_dev,
"%s: cmd 0x%x olen %u dlen %u\n",
__func__, hdr->code, olen, dlen);
olen = dlen;
}
/* XXX have submitter do this */
/* copy answer into caller's supplied buffer */
bcopy(&rp[1], cmd->odata, olen);
cmd->olen = olen;
}
}
wakeup_one(cmd); /* wake up caller */
break;
case WDCMSG_TARGET_START:
if (hdr->msgid >= UATH_CMD_LIST_COUNT) {
/* XXX */
return;
}
dlen = hdr->len - sizeof(*hdr);
if (dlen != (int)sizeof(uint32_t)) {
/* XXX something wrong */
return;
}
/* XXX have submitter do this */
/* copy answer into caller's supplied buffer */
bcopy(hdr+1, cmd->odata, sizeof(uint32_t));
cmd->olen = sizeof(uint32_t);
wakeup_one(cmd); /* wake up caller */
break;
case WDCMSG_SEND_COMPLETE:
/* this notification is sent when UATH_TX_NOTIFY is set */
DPRINTF(sc, UATH_DEBUG_RX_PROC | UATH_DEBUG_RECV_ALL,
"%s: received Tx notification\n", __func__);
break;
case WDCMSG_TARGET_GET_STATS:
DPRINTF(sc, UATH_DEBUG_RX_PROC | UATH_DEBUG_RECV_ALL,
"%s: received device statistics\n", __func__);
callout_reset(&sc->stat_ch, hz, uath_stat, sc);
break;
}
}
static void
uath_intr_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uath_softc *sc = usbd_xfer_softc(xfer);
struct uath_cmd *cmd;
struct usb_page_cache *pc;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
UATH_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
cmd = STAILQ_FIRST(&sc->sc_cmd_waiting);
if (cmd == NULL)
goto setup;
STAILQ_REMOVE_HEAD(&sc->sc_cmd_waiting, next);
UATH_STAT_DEC(sc, st_cmd_waiting);
STAILQ_INSERT_TAIL(&sc->sc_cmd_inactive, cmd, next);
UATH_STAT_INC(sc, st_cmd_inactive);
KASSERT(actlen >= (int)sizeof(struct uath_cmd_hdr),
("short xfer error"));
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, cmd->buf, actlen);
uath_cmdeof(sc, cmd);
case USB_ST_SETUP:
setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
break;
default:
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto setup;
}
break;
}
}
static void
uath_intr_tx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uath_softc *sc = usbd_xfer_softc(xfer);
struct uath_cmd *cmd;
UATH_ASSERT_LOCKED(sc);
cmd = STAILQ_FIRST(&sc->sc_cmd_active);
if (cmd != NULL && USB_GET_STATE(xfer) != USB_ST_SETUP) {
STAILQ_REMOVE_HEAD(&sc->sc_cmd_active, next);
UATH_STAT_DEC(sc, st_cmd_active);
STAILQ_INSERT_TAIL((cmd->flags & UATH_CMD_FLAG_READ) ?
&sc->sc_cmd_waiting : &sc->sc_cmd_inactive, cmd, next);
if (cmd->flags & UATH_CMD_FLAG_READ)
UATH_STAT_INC(sc, st_cmd_waiting);
else
UATH_STAT_INC(sc, st_cmd_inactive);
}
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
case USB_ST_SETUP:
setup:
cmd = STAILQ_FIRST(&sc->sc_cmd_pending);
if (cmd == NULL) {
DPRINTF(sc, UATH_DEBUG_XMIT, "%s: empty pending queue\n",
__func__);
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_cmd_pending, next);
UATH_STAT_DEC(sc, st_cmd_pending);
STAILQ_INSERT_TAIL((cmd->flags & UATH_CMD_FLAG_ASYNC) ?
&sc->sc_cmd_inactive : &sc->sc_cmd_active, cmd, next);
if (cmd->flags & UATH_CMD_FLAG_ASYNC)
UATH_STAT_INC(sc, st_cmd_inactive);
else
UATH_STAT_INC(sc, st_cmd_active);
usbd_xfer_set_frame_data(xfer, 0, cmd->buf, cmd->buflen);
usbd_transfer_submit(xfer);
break;
default:
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto setup;
}
break;
}
}
static void
uath_update_rxstat(struct uath_softc *sc, uint32_t status)
{
switch (status) {
case UATH_STATUS_STOP_IN_PROGRESS:
UATH_STAT_INC(sc, st_stopinprogress);
break;
case UATH_STATUS_CRC_ERR:
UATH_STAT_INC(sc, st_crcerr);
break;
case UATH_STATUS_PHY_ERR:
UATH_STAT_INC(sc, st_phyerr);
break;
case UATH_STATUS_DECRYPT_CRC_ERR:
UATH_STAT_INC(sc, st_decrypt_crcerr);
break;
case UATH_STATUS_DECRYPT_MIC_ERR:
UATH_STAT_INC(sc, st_decrypt_micerr);
break;
case UATH_STATUS_DECOMP_ERR:
UATH_STAT_INC(sc, st_decomperr);
break;
case UATH_STATUS_KEY_ERR:
UATH_STAT_INC(sc, st_keyerr);
break;
case UATH_STATUS_ERR:
UATH_STAT_INC(sc, st_err);
break;
default:
break;
}
}
static struct mbuf *
uath_data_rxeof(struct usb_xfer *xfer, struct uath_data *data,
struct uath_rx_desc **pdesc)
{
struct uath_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct uath_chunk *chunk;
struct uath_rx_desc *desc;
struct mbuf *m = data->m, *mnew, *mp;
uint16_t chunklen;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
if (actlen < (int)UATH_MIN_RXBUFSZ) {
DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL,
"%s: wrong xfer size (len=%d)\n", __func__, actlen);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return (NULL);
}
chunk = (struct uath_chunk *)data->buf;
if (chunk->seqnum == 0 && chunk->flags == 0 && chunk->length == 0) {
device_printf(sc->sc_dev, "%s: strange response\n", __func__);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
UATH_RESET_INTRX(sc);
return (NULL);
}
if (chunk->seqnum != sc->sc_intrx_nextnum) {
DPRINTF(sc, UATH_DEBUG_XMIT, "invalid seqnum %d, expected %d\n",
chunk->seqnum, sc->sc_intrx_nextnum);
UATH_STAT_INC(sc, st_badchunkseqnum);
if (sc->sc_intrx_head != NULL)
m_freem(sc->sc_intrx_head);
UATH_RESET_INTRX(sc);
return (NULL);
}
/* check multi-chunk frames */
if ((chunk->seqnum == 0 && !(chunk->flags & UATH_CFLAGS_FINAL)) ||
(chunk->seqnum != 0 && (chunk->flags & UATH_CFLAGS_FINAL)) ||
chunk->flags & UATH_CFLAGS_RXMSG)
UATH_STAT_INC(sc, st_multichunk);
chunklen = be16toh(chunk->length);
if (chunk->flags & UATH_CFLAGS_FINAL)
chunklen -= sizeof(struct uath_rx_desc);
if (chunklen > 0 &&
(!(chunk->flags & UATH_CFLAGS_FINAL) || !(chunk->seqnum == 0))) {
/* we should use intermediate RX buffer */
if (chunk->seqnum == 0)
UATH_RESET_INTRX(sc);
if ((sc->sc_intrx_len + sizeof(struct uath_rx_desc) +
chunklen) > UATH_MAX_INTRX_SIZE) {
UATH_STAT_INC(sc, st_invalidlen);
- if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
if (sc->sc_intrx_head != NULL)
m_freem(sc->sc_intrx_head);
UATH_RESET_INTRX(sc);
return (NULL);
}
m->m_len = chunklen;
m->m_data += sizeof(struct uath_chunk);
if (sc->sc_intrx_head == NULL) {
sc->sc_intrx_head = m;
sc->sc_intrx_tail = m;
} else {
m->m_flags &= ~M_PKTHDR;
sc->sc_intrx_tail->m_next = m;
sc->sc_intrx_tail = m;
}
}
sc->sc_intrx_len += chunklen;
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL,
"%s: can't get new mbuf, drop frame\n", __func__);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
if (sc->sc_intrx_head != NULL)
m_freem(sc->sc_intrx_head);
UATH_RESET_INTRX(sc);
return (NULL);
}
data->m = mnew;
data->buf = mtod(mnew, uint8_t *);
/* if the frame is not final continue the transfer */
if (!(chunk->flags & UATH_CFLAGS_FINAL)) {
sc->sc_intrx_nextnum++;
UATH_RESET_INTRX(sc);
return (NULL);
}
/*
* if the frame is not set UATH_CFLAGS_RXMSG, then rx descriptor is
* located at the end, 32-bit aligned
*/
desc = (chunk->flags & UATH_CFLAGS_RXMSG) ?
(struct uath_rx_desc *)(chunk + 1) :
(struct uath_rx_desc *)(((uint8_t *)chunk) +
sizeof(struct uath_chunk) + be16toh(chunk->length) -
sizeof(struct uath_rx_desc));
*pdesc = desc;
DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL,
"%s: frame len %u code %u status %u rate %u antenna %u "
"rssi %d channel %u phyerror %u connix %u decrypterror %u "
"keycachemiss %u\n", __func__, be32toh(desc->framelen)
, be32toh(desc->code), be32toh(desc->status), be32toh(desc->rate)
, be32toh(desc->antenna), be32toh(desc->rssi), be32toh(desc->channel)
, be32toh(desc->phyerror), be32toh(desc->connix)
, be32toh(desc->decrypterror), be32toh(desc->keycachemiss));
if (be32toh(desc->len) > MCLBYTES) {
DPRINTF(sc, UATH_DEBUG_RECV | UATH_DEBUG_RECV_ALL,
"%s: bad descriptor (len=%d)\n", __func__,
be32toh(desc->len));
- if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
UATH_STAT_INC(sc, st_toobigrxpkt);
if (sc->sc_intrx_head != NULL)
m_freem(sc->sc_intrx_head);
UATH_RESET_INTRX(sc);
return (NULL);
}
uath_update_rxstat(sc, be32toh(desc->status));
/* finalize mbuf */
if (sc->sc_intrx_head == NULL) {
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len =
be32toh(desc->framelen) - UATH_RX_DUMMYSIZE;
m->m_data += sizeof(struct uath_chunk);
} else {
mp = sc->sc_intrx_head;
- mp->m_pkthdr.rcvif = ifp;
mp->m_flags |= M_PKTHDR;
mp->m_pkthdr.len = sc->sc_intrx_len;
m = mp;
}
/* there are a lot more fields in the RX descriptor */
if ((sc->sc_flags & UATH_FLAG_INVALID) == 0 &&
ieee80211_radiotap_active(ic)) {
struct uath_rx_radiotap_header *tap = &sc->sc_rxtap;
uint32_t tsf_hi = be32toh(desc->tstamp_high);
uint32_t tsf_lo = be32toh(desc->tstamp_low);
/* XXX only get low order 24bits of tsf from h/w */
tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo);
tap->wr_flags = 0;
if (be32toh(desc->status) == UATH_STATUS_CRC_ERR)
tap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
/* XXX map other status to BADFCS? */
/* XXX ath h/w rate code, need to map */
tap->wr_rate = be32toh(desc->rate);
tap->wr_antenna = be32toh(desc->antenna);
tap->wr_antsignal = -95 + be32toh(desc->rssi);
tap->wr_antnoise = -95;
}
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
UATH_RESET_INTRX(sc);
return (m);
}
static void
uath_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uath_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *m = NULL;
struct uath_data *data;
struct uath_rx_desc *desc = NULL;
int8_t nf;
UATH_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data == NULL)
goto setup;
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
UATH_STAT_DEC(sc, st_rx_active);
m = uath_data_rxeof(xfer, data, &desc);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
UATH_STAT_INC(sc, st_rx_inactive);
/* FALLTHROUGH */
case USB_ST_SETUP:
setup:
data = STAILQ_FIRST(&sc->sc_rx_inactive);
if (data == NULL)
return;
STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next);
UATH_STAT_DEC(sc, st_rx_inactive);
STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next);
UATH_STAT_INC(sc, st_rx_active);
usbd_xfer_set_frame_data(xfer, 0, data->buf, MCLBYTES);
usbd_transfer_submit(xfer);
/*
* To avoid LOR we should unlock our private mutex here to call
* ieee80211_input() because here is at the end of a USB
* callback and safe to unlock.
*/
if (sc->sc_flags & UATH_FLAG_INVALID) {
if (m != NULL)
m_freem(m);
return;
}
UATH_UNLOCK(sc);
if (m != NULL && desc != NULL) {
wh = mtod(m, struct ieee80211_frame *);
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
nf = -95; /* XXX */
if (ni != NULL) {
(void) ieee80211_input(ni, m,
(int)be32toh(desc->rssi), nf);
/* node is no longer needed */
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m,
(int)be32toh(desc->rssi), nf);
m = NULL;
desc = NULL;
}
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
- !IFQ_IS_EMPTY(&ifp->if_snd))
- uath_start(ifp);
UATH_LOCK(sc);
+ uath_start(sc);
break;
default:
/* needs it to the inactive queue due to a error. */
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
UATH_STAT_DEC(sc, st_rx_active);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
UATH_STAT_INC(sc, st_rx_inactive);
}
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto setup;
}
break;
}
}
static void
uath_data_txeof(struct usb_xfer *xfer, struct uath_data *data)
{
struct uath_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct mbuf *m;
UATH_ASSERT_LOCKED(sc);
- /*
- * Do any tx complete callback. Note this must be done before releasing
- * the node reference.
- */
if (data->m) {
- m = data->m;
- if (m->m_flags & M_TXCB &&
- (sc->sc_flags & UATH_FLAG_INVALID) == 0) {
- /* XXX status? */
- ieee80211_process_callback(data->ni, m, 0);
- }
- m_freem(m);
+ /* XXX status? */
+ ieee80211_tx_complete(data->ni, data->m, 0);
data->m = NULL;
- }
- if (data->ni) {
- if ((sc->sc_flags & UATH_FLAG_INVALID) == 0)
- ieee80211_free_node(data->ni);
data->ni = NULL;
}
sc->sc_tx_timer = 0;
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
static void
uath_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uath_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
struct uath_data *data;
UATH_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_tx_active);
if (data == NULL)
goto setup;
STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next);
UATH_STAT_DEC(sc, st_tx_active);
uath_data_txeof(xfer, data);
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next);
UATH_STAT_INC(sc, st_tx_inactive);
/* FALLTHROUGH */
case USB_ST_SETUP:
setup:
data = STAILQ_FIRST(&sc->sc_tx_pending);
if (data == NULL) {
DPRINTF(sc, UATH_DEBUG_XMIT, "%s: empty pending queue\n",
__func__);
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next);
UATH_STAT_DEC(sc, st_tx_pending);
STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next);
UATH_STAT_INC(sc, st_tx_active);
usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen);
usbd_transfer_submit(xfer);
- UATH_UNLOCK(sc);
- uath_start(ifp);
- UATH_LOCK(sc);
+ uath_start(sc);
break;
default:
data = STAILQ_FIRST(&sc->sc_tx_active);
if (data == NULL)
goto setup;
if (data->ni != NULL) {
+ if_inc_counter(data->ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
if ((sc->sc_flags & UATH_FLAG_INVALID) == 0)
ieee80211_free_node(data->ni);
data->ni = NULL;
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto setup;
}
break;
}
}
static device_method_t uath_methods[] = {
DEVMETHOD(device_probe, uath_match),
DEVMETHOD(device_attach, uath_attach),
DEVMETHOD(device_detach, uath_detach),
DEVMETHOD_END
};
static driver_t uath_driver = {
.name = "uath",
.methods = uath_methods,
.size = sizeof(struct uath_softc)
};
static devclass_t uath_devclass;
DRIVER_MODULE(uath, uhub, uath_driver, uath_devclass, NULL, 0);
MODULE_DEPEND(uath, wlan, 1, 1, 1);
MODULE_DEPEND(uath, usb, 1, 1, 1);
MODULE_VERSION(uath, 1);
Index: head/sys/dev/usb/wlan/if_uathvar.h
===================================================================
--- head/sys/dev/usb/wlan/if_uathvar.h (revision 287196)
+++ head/sys/dev/usb/wlan/if_uathvar.h (revision 287197)
@@ -1,247 +1,248 @@
/* $OpenBSD: if_uathvar.h,v 1.3 2006/09/20 19:47:17 damien Exp $ */
/* $FreeBSD$ */
/*-
* Copyright (c) 2006
* Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2006 Sam Leffler, Errno Consulting
* Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
enum {
UATH_INTR_RX,
UATH_INTR_TX,
UATH_BULK_RX,
UATH_BULK_TX,
UATH_N_XFERS = 4,
};
#define UATH_ID_BSS 2 /* Connection ID */
#define UATH_RX_DATA_LIST_COUNT 128
#define UATH_TX_DATA_LIST_COUNT 16
#define UATH_CMD_LIST_COUNT 60
#define UATH_DATA_TIMEOUT 10000
#define UATH_CMD_TIMEOUT 1000
/* flags for sending firmware commands */
#define UATH_CMD_FLAG_ASYNC (1 << 0)
#define UATH_CMD_FLAG_READ (1 << 1)
#define UATH_CMD_FLAG_MAGIC (1 << 2)
struct uath_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
u_int64_t wr_tsf;
u_int8_t wr_flags;
u_int8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
u_int8_t wr_antenna;
} __packed __aligned(8);
#define UATH_RX_RADIOTAP_PRESENT ( \
(1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_ANTENNA) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | \
0)
struct uath_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed __aligned(8);
#define UATH_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct uath_data {
struct uath_softc *sc;
uint8_t *buf;
uint16_t buflen;
struct mbuf *m;
struct ieee80211_node *ni; /* NB: tx only */
STAILQ_ENTRY(uath_data) next;
};
typedef STAILQ_HEAD(, uath_data) uath_datahead;
struct uath_cmd {
struct uath_softc *sc;
uint32_t flags;
uint32_t msgid;
uint8_t *buf;
uint16_t buflen;
void *odata; /* NB: tx only */
int olen; /* space in odata */
STAILQ_ENTRY(uath_cmd) next;
};
typedef STAILQ_HEAD(, uath_cmd) uath_cmdhead;
struct uath_wme_settings {
uint8_t aifsn;
uint8_t logcwmin;
uint8_t logcwmax;
uint16_t txop;
#define UATH_TXOP_TO_US(txop) ((txop) << 5)
uint8_t acm;
};
struct uath_devcap {
uint32_t targetVersion;
uint32_t targetRevision;
uint32_t macVersion;
uint32_t macRevision;
uint32_t phyRevision;
uint32_t analog5GhzRevision;
uint32_t analog2GhzRevision;
uint32_t regDomain;
uint32_t regCapBits;
uint32_t countryCode;
uint32_t keyCacheSize;
uint32_t numTxQueues;
uint32_t connectionIdMax;
uint32_t wirelessModes;
#define UATH_WIRELESS_MODE_11A 0x01
#define UATH_WIRELESS_MODE_TURBO 0x02
#define UATH_WIRELESS_MODE_11B 0x04
#define UATH_WIRELESS_MODE_11G 0x08
#define UATH_WIRELESS_MODE_108G 0x10
uint32_t chanSpreadSupport;
uint32_t compressSupport;
uint32_t burstSupport;
uint32_t fastFramesSupport;
uint32_t chapTuningSupport;
uint32_t turboGSupport;
uint32_t turboPrimeSupport;
uint32_t deviceType;
uint32_t wmeSupport;
uint32_t low2GhzChan;
uint32_t high2GhzChan;
uint32_t low5GhzChan;
uint32_t high5GhzChan;
uint32_t supportCipherWEP;
uint32_t supportCipherAES_CCM;
uint32_t supportCipherTKIP;
uint32_t supportCipherMicAES_CCM;
uint32_t supportMicTKIP;
uint32_t twiceAntennaGain5G;
uint32_t twiceAntennaGain2G;
};
struct uath_stat {
uint32_t st_badchunkseqnum;
uint32_t st_invalidlen;
uint32_t st_multichunk;
uint32_t st_toobigrxpkt;
uint32_t st_stopinprogress;
uint32_t st_crcerr;
uint32_t st_phyerr;
uint32_t st_decrypt_crcerr;
uint32_t st_decrypt_micerr;
uint32_t st_decomperr;
uint32_t st_keyerr;
uint32_t st_err;
/* CMD/RX/TX queues */
uint32_t st_cmd_active;
uint32_t st_cmd_inactive;
uint32_t st_cmd_pending;
uint32_t st_cmd_waiting;
uint32_t st_rx_active;
uint32_t st_rx_inactive;
uint32_t st_tx_active;
uint32_t st_tx_inactive;
uint32_t st_tx_pending;
};
#define UATH_STAT_INC(sc, var) (sc)->sc_stat.var++
#define UATH_STAT_DEC(sc, var) (sc)->sc_stat.var--
struct uath_vap {
struct ieee80211vap vap;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define UATH_VAP(vap) ((struct uath_vap *)(vap))
struct uath_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct usb_device *sc_udev;
void *sc_cmd_dma_buf;
void *sc_tx_dma_buf;
struct mtx sc_mtx;
uint32_t sc_debug;
struct uath_stat sc_stat;
int (*sc_newstate)(struct ieee80211com *,
enum ieee80211_state, int);
struct usb_xfer *sc_xfer[UATH_N_XFERS];
struct uath_cmd sc_cmd[UATH_CMD_LIST_COUNT];
uath_cmdhead sc_cmd_active;
uath_cmdhead sc_cmd_inactive;
uath_cmdhead sc_cmd_pending;
uath_cmdhead sc_cmd_waiting;
struct uath_data sc_rx[UATH_RX_DATA_LIST_COUNT];
uath_datahead sc_rx_active;
uath_datahead sc_rx_inactive;
struct uath_data sc_tx[UATH_TX_DATA_LIST_COUNT];
uath_datahead sc_tx_active;
uath_datahead sc_tx_inactive;
uath_datahead sc_tx_pending;
uint32_t sc_msgid;
uint32_t sc_seqnum;
int sc_tx_timer;
struct callout watchdog_ch;
struct callout stat_ch;
/* multi-chunked support */
struct mbuf *sc_intrx_head;
struct mbuf *sc_intrx_tail;
uint8_t sc_intrx_nextnum;
uint32_t sc_intrx_len;
#define UATH_MAX_INTRX_SIZE 3616
struct uath_devcap sc_devcap;
uint8_t sc_serial[16];
/* unsorted */
uint32_t sc_flags;
#define UATH_FLAG_INVALID (1 << 1)
#define UATH_FLAG_INITDONE (1 << 2)
struct uath_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct uath_tx_radiotap_header sc_txtap;
int sc_txtap_len;
};
#define UATH_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define UATH_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define UATH_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
#define UATH_RESET_INTRX(sc) do { \
(sc)->sc_intrx_head = NULL; \
(sc)->sc_intrx_tail = NULL; \
(sc)->sc_intrx_nextnum = 0; \
(sc)->sc_intrx_len = 0; \
} while (0)
Index: head/sys/dev/usb/wlan/if_upgt.c
===================================================================
--- head/sys/dev/usb/wlan/if_upgt.c (revision 287196)
+++ head/sys/dev/usb/wlan/if_upgt.c (revision 287197)
@@ -1,2453 +1,2355 @@
/* $OpenBSD: if_upgt.c,v 1.35 2008/04/16 18:32:15 damien Exp $ */
/* $FreeBSD$ */
/*
* Copyright (c) 2007 Marcus Glocker <mglocker@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/endian.h>
#include <sys/firmware.h>
#include <sys/linker.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_phy.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net/bpf.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include "usbdevs.h"
#include <dev/usb/wlan/if_upgtvar.h>
/*
* Driver for the USB PrismGT devices.
*
* For now just USB 2.0 devices with the GW3887 chipset are supported.
* The driver has been written based on the firmware version 2.13.1.0_LM87.
*
* TODO's:
* - MONITOR mode test.
* - Add HOSTAP mode.
* - Add IBSS mode.
* - Support the USB 1.0 devices (NET2280, ISL3880, ISL3886 chipsets).
*
* Parts of this driver has been influenced by reading the p54u driver
* written by Jean-Baptiste Note <jean-baptiste.note@m4x.org> and
* Sebastien Bourdeauducq <lekernel@prism54.org>.
*/
static SYSCTL_NODE(_hw, OID_AUTO, upgt, CTLFLAG_RD, 0,
"USB PrismGT GW3887 driver parameters");
#ifdef UPGT_DEBUG
int upgt_debug = 0;
SYSCTL_INT(_hw_upgt, OID_AUTO, debug, CTLFLAG_RWTUN, &upgt_debug,
0, "control debugging printfs");
enum {
UPGT_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
UPGT_DEBUG_RECV = 0x00000002, /* basic recv operation */
UPGT_DEBUG_RESET = 0x00000004, /* reset processing */
UPGT_DEBUG_INTR = 0x00000008, /* INTR */
UPGT_DEBUG_TX_PROC = 0x00000010, /* tx ISR proc */
UPGT_DEBUG_RX_PROC = 0x00000020, /* rx ISR proc */
UPGT_DEBUG_STATE = 0x00000040, /* 802.11 state transitions */
UPGT_DEBUG_STAT = 0x00000080, /* statistic */
UPGT_DEBUG_FW = 0x00000100, /* firmware */
UPGT_DEBUG_ANY = 0xffffffff
};
#define DPRINTF(sc, m, fmt, ...) do { \
if (sc->sc_debug & (m)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, m, fmt, ...) do { \
(void) sc; \
} while (0)
#endif
/*
* Prototypes.
*/
static device_probe_t upgt_match;
static device_attach_t upgt_attach;
static device_detach_t upgt_detach;
static int upgt_alloc_tx(struct upgt_softc *);
static int upgt_alloc_rx(struct upgt_softc *);
static int upgt_device_reset(struct upgt_softc *);
static void upgt_bulk_tx(struct upgt_softc *, struct upgt_data *);
static int upgt_fw_verify(struct upgt_softc *);
static int upgt_mem_init(struct upgt_softc *);
static int upgt_fw_load(struct upgt_softc *);
static int upgt_fw_copy(const uint8_t *, char *, int);
static uint32_t upgt_crc32_le(const void *, size_t);
static struct mbuf *
upgt_rxeof(struct usb_xfer *, struct upgt_data *, int *);
static struct mbuf *
upgt_rx(struct upgt_softc *, uint8_t *, int, int *);
static void upgt_txeof(struct usb_xfer *, struct upgt_data *);
static int upgt_eeprom_read(struct upgt_softc *);
static int upgt_eeprom_parse(struct upgt_softc *);
static void upgt_eeprom_parse_hwrx(struct upgt_softc *, uint8_t *);
static void upgt_eeprom_parse_freq3(struct upgt_softc *, uint8_t *, int);
static void upgt_eeprom_parse_freq4(struct upgt_softc *, uint8_t *, int);
static void upgt_eeprom_parse_freq6(struct upgt_softc *, uint8_t *, int);
static uint32_t upgt_chksum_le(const uint32_t *, size_t);
static void upgt_tx_done(struct upgt_softc *, uint8_t *);
-static void upgt_init(void *);
-static void upgt_init_locked(struct upgt_softc *);
-static int upgt_ioctl(struct ifnet *, u_long, caddr_t);
-static void upgt_start(struct ifnet *);
+static void upgt_init(struct upgt_softc *);
+static void upgt_parent(struct ieee80211com *);
+static int upgt_transmit(struct ieee80211com *, struct mbuf *);
+static void upgt_start(struct upgt_softc *);
static int upgt_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void upgt_scan_start(struct ieee80211com *);
static void upgt_scan_end(struct ieee80211com *);
static void upgt_set_channel(struct ieee80211com *);
static struct ieee80211vap *upgt_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void upgt_vap_delete(struct ieee80211vap *);
static void upgt_update_mcast(struct ieee80211com *);
static uint8_t upgt_rx_rate(struct upgt_softc *, const int);
static void upgt_set_multi(void *);
static void upgt_stop(struct upgt_softc *);
static void upgt_setup_rates(struct ieee80211vap *, struct ieee80211com *);
static int upgt_set_macfilter(struct upgt_softc *, uint8_t);
static int upgt_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void upgt_set_chan(struct upgt_softc *, struct ieee80211_channel *);
static void upgt_set_led(struct upgt_softc *, int);
static void upgt_set_led_blink(void *);
static void upgt_get_stats(struct upgt_softc *);
static void upgt_mem_free(struct upgt_softc *, uint32_t);
static uint32_t upgt_mem_alloc(struct upgt_softc *);
static void upgt_free_tx(struct upgt_softc *);
static void upgt_free_rx(struct upgt_softc *);
static void upgt_watchdog(void *);
static void upgt_abort_xfers(struct upgt_softc *);
static void upgt_abort_xfers_locked(struct upgt_softc *);
static void upgt_sysctl_node(struct upgt_softc *);
static struct upgt_data *
upgt_getbuf(struct upgt_softc *);
static struct upgt_data *
upgt_gettxbuf(struct upgt_softc *);
static int upgt_tx_start(struct upgt_softc *, struct mbuf *,
struct ieee80211_node *, struct upgt_data *);
static const char *upgt_fwname = "upgt-gw3887";
static const STRUCT_USB_HOST_ID upgt_devs[] = {
#define UPGT_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) }
/* version 2 devices */
UPGT_DEV(ACCTON, PRISM_GT),
UPGT_DEV(BELKIN, F5D7050),
UPGT_DEV(CISCOLINKSYS, WUSB54AG),
UPGT_DEV(CONCEPTRONIC, PRISM_GT),
UPGT_DEV(DELL, PRISM_GT_1),
UPGT_DEV(DELL, PRISM_GT_2),
UPGT_DEV(FSC, E5400),
UPGT_DEV(GLOBESPAN, PRISM_GT_1),
UPGT_DEV(GLOBESPAN, PRISM_GT_2),
UPGT_DEV(NETGEAR, WG111V1_2),
UPGT_DEV(INTERSIL, PRISM_GT),
UPGT_DEV(SMC, 2862WG),
UPGT_DEV(USR, USR5422),
UPGT_DEV(WISTRONNEWEB, UR045G),
UPGT_DEV(XYRATEX, PRISM_GT_1),
UPGT_DEV(XYRATEX, PRISM_GT_2),
UPGT_DEV(ZCOM, XG703A),
UPGT_DEV(ZCOM, XM142)
};
static usb_callback_t upgt_bulk_rx_callback;
static usb_callback_t upgt_bulk_tx_callback;
static const struct usb_config upgt_config[UPGT_N_XFERS] = {
[UPGT_BULK_TX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = MCLBYTES * UPGT_TX_MAXCOUNT,
.flags = {
.force_short_xfer = 1,
.pipe_bof = 1
},
.callback = upgt_bulk_tx_callback,
.timeout = UPGT_USB_TIMEOUT, /* ms */
},
[UPGT_BULK_RX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = MCLBYTES * UPGT_RX_MAXCOUNT,
.flags = {
.pipe_bof = 1,
.short_xfer_ok = 1
},
.callback = upgt_bulk_rx_callback,
},
};
static int
upgt_match(device_t dev)
{
struct usb_attach_arg *uaa = device_get_ivars(dev);
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != UPGT_CONFIG_INDEX)
return (ENXIO);
if (uaa->info.bIfaceIndex != UPGT_IFACE_INDEX)
return (ENXIO);
return (usbd_lookup_id_by_uaa(upgt_devs, sizeof(upgt_devs), uaa));
}
static int
upgt_attach(device_t dev)
{
- int error;
- struct ieee80211com *ic;
- struct ifnet *ifp;
struct upgt_softc *sc = device_get_softc(dev);
+ struct ieee80211com *ic = &sc->sc_ic;
struct usb_attach_arg *uaa = device_get_ivars(dev);
uint8_t bands, iface_index = UPGT_IFACE_INDEX;
+ int error;
sc->sc_dev = dev;
sc->sc_udev = uaa->device;
#ifdef UPGT_DEBUG
sc->sc_debug = upgt_debug;
#endif
device_set_usb_desc(dev);
mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK,
MTX_DEF);
callout_init(&sc->sc_led_ch, 0);
callout_init(&sc->sc_watchdog_ch, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
upgt_config, UPGT_N_XFERS, sc, &sc->sc_mtx);
if (error) {
device_printf(dev, "could not allocate USB transfers, "
"err=%s\n", usbd_errstr(error));
goto fail1;
}
sc->sc_rx_dma_buf = usbd_xfer_get_frame_buffer(
sc->sc_xfer[UPGT_BULK_RX], 0);
sc->sc_tx_dma_buf = usbd_xfer_get_frame_buffer(
sc->sc_xfer[UPGT_BULK_TX], 0);
/* Setup TX and RX buffers */
error = upgt_alloc_tx(sc);
if (error)
goto fail2;
error = upgt_alloc_rx(sc);
if (error)
goto fail3;
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- goto fail4;
- }
-
/* Initialize the device. */
error = upgt_device_reset(sc);
if (error)
- goto fail5;
+ goto fail4;
/* Verify the firmware. */
error = upgt_fw_verify(sc);
if (error)
- goto fail5;
+ goto fail4;
/* Calculate device memory space. */
if (sc->sc_memaddr_frame_start == 0 || sc->sc_memaddr_frame_end == 0) {
device_printf(dev,
"could not find memory space addresses on FW\n");
error = EIO;
- goto fail5;
+ goto fail4;
}
sc->sc_memaddr_frame_end -= UPGT_MEMSIZE_RX + 1;
sc->sc_memaddr_rx_start = sc->sc_memaddr_frame_end + 1;
DPRINTF(sc, UPGT_DEBUG_FW, "memory address frame start=0x%08x\n",
sc->sc_memaddr_frame_start);
DPRINTF(sc, UPGT_DEBUG_FW, "memory address frame end=0x%08x\n",
sc->sc_memaddr_frame_end);
DPRINTF(sc, UPGT_DEBUG_FW, "memory address rx start=0x%08x\n",
sc->sc_memaddr_rx_start);
upgt_mem_init(sc);
/* Load the firmware. */
error = upgt_fw_load(sc);
if (error)
- goto fail5;
+ goto fail4;
/* Read the whole EEPROM content and parse it. */
error = upgt_eeprom_read(sc);
if (error)
- goto fail5;
+ goto fail4;
error = upgt_eeprom_parse(sc);
if (error)
- goto fail5;
+ goto fail4;
/* all works related with the device have done here. */
upgt_abort_xfers(sc);
- /* Setup the 802.11 device. */
- ifp->if_softc = sc;
- if_initname(ifp, "upgt", device_get_unit(sc->sc_dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = upgt_init;
- ifp->if_ioctl = upgt_ioctl;
- ifp->if_start = upgt_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- IFQ_SET_READY(&ifp->if_snd);
-
- ic = ifp->if_l2com;
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA;
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
| IEEE80211_C_WPA /* 802.11i */
;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, sc->sc_myaddr);
+ ieee80211_ifattach(ic);
ic->ic_raw_xmit = upgt_raw_xmit;
ic->ic_scan_start = upgt_scan_start;
ic->ic_scan_end = upgt_scan_end;
ic->ic_set_channel = upgt_set_channel;
-
ic->ic_vap_create = upgt_vap_create;
ic->ic_vap_delete = upgt_vap_delete;
ic->ic_update_mcast = upgt_update_mcast;
+ ic->ic_transmit = upgt_transmit;
+ ic->ic_parent = upgt_parent;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
UPGT_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
UPGT_RX_RADIOTAP_PRESENT);
upgt_sysctl_node(sc);
if (bootverbose)
ieee80211_announce(ic);
return (0);
-fail5: if_free(ifp);
fail4: upgt_free_rx(sc);
fail3: upgt_free_tx(sc);
fail2: usbd_transfer_unsetup(sc->sc_xfer, UPGT_N_XFERS);
fail1: mtx_destroy(&sc->sc_mtx);
return (error);
}
static void
upgt_txeof(struct usb_xfer *xfer, struct upgt_data *data)
{
- struct upgt_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct mbuf *m;
- UPGT_ASSERT_LOCKED(sc);
-
- /*
- * Do any tx complete callback. Note this must be done before releasing
- * the node reference.
- */
if (data->m) {
- m = data->m;
- if (m->m_flags & M_TXCB) {
- /* XXX status? */
- ieee80211_process_callback(data->ni, m, 0);
- }
- m_freem(m);
+ /* XXX status? */
+ ieee80211_tx_complete(data->ni, data->m, 0);
data->m = NULL;
- }
- if (data->ni) {
- ieee80211_free_node(data->ni);
data->ni = NULL;
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
static void
upgt_get_stats(struct upgt_softc *sc)
{
struct upgt_data *data_cmd;
struct upgt_lmac_mem *mem;
struct upgt_lmac_stats *stats;
data_cmd = upgt_getbuf(sc);
if (data_cmd == NULL) {
device_printf(sc->sc_dev, "%s: out of buffers.\n", __func__);
return;
}
/*
* Transmit the URB containing the CMD data.
*/
memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
UPGT_MEMSIZE_FRAME_HEAD);
stats = (struct upgt_lmac_stats *)(mem + 1);
stats->header1.flags = 0;
stats->header1.type = UPGT_H1_TYPE_CTRL;
stats->header1.len = htole16(
sizeof(struct upgt_lmac_stats) - sizeof(struct upgt_lmac_header));
stats->header2.reqid = htole32(sc->sc_memaddr_frame_start);
stats->header2.type = htole16(UPGT_H2_TYPE_STATS);
stats->header2.flags = 0;
data_cmd->buflen = sizeof(*mem) + sizeof(*stats);
mem->chksum = upgt_chksum_le((uint32_t *)stats,
data_cmd->buflen - sizeof(*mem));
upgt_bulk_tx(sc, data_cmd);
}
-static int
-upgt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+upgt_parent(struct ieee80211com *ic)
{
- struct upgt_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- int error;
+ struct upgt_softc *sc = ic->ic_softc;
int startall = 0;
UPGT_LOCK(sc);
- error = (sc->sc_flags & UPGT_FLAG_DETACHED) ? ENXIO : 0;
- UPGT_UNLOCK(sc);
- if (error)
- return (error);
-
- switch (cmd) {
- case SIOCSIFFLAGS:
- if (ifp->if_flags & IFF_UP) {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if ((ifp->if_flags ^ sc->sc_if_flags) &
- (IFF_ALLMULTI | IFF_PROMISC))
- upgt_set_multi(sc);
- } else {
- upgt_init(sc);
- startall = 1;
- }
+ if (sc->sc_flags & UPGT_FLAG_DETACHED) {
+ UPGT_UNLOCK(sc);
+ return;
+ }
+ if (ic->ic_nrunning > 0) {
+ if (sc->sc_flags & UPGT_FLAG_INITDONE) {
+ if (ic->ic_allmulti > 0 || ic->ic_promisc > 0)
+ upgt_set_multi(sc);
} else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- upgt_stop(sc);
+ upgt_init(sc);
+ startall = 1;
}
- sc->sc_if_flags = ifp->if_flags;
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
- }
- return error;
+ } else if (sc->sc_flags & UPGT_FLAG_INITDONE)
+ upgt_stop(sc);
+ UPGT_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static void
-upgt_stop_locked(struct upgt_softc *sc)
+upgt_stop(struct upgt_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
UPGT_ASSERT_LOCKED(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_flags & UPGT_FLAG_INITDONE)
upgt_set_macfilter(sc, IEEE80211_S_INIT);
upgt_abort_xfers_locked(sc);
-}
-
-static void
-upgt_stop(struct upgt_softc *sc)
-{
- struct ifnet *ifp = sc->sc_ifp;
-
- UPGT_LOCK(sc);
- upgt_stop_locked(sc);
- UPGT_UNLOCK(sc);
-
/* device down */
sc->sc_tx_timer = 0;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
sc->sc_flags &= ~UPGT_FLAG_INITDONE;
}
static void
upgt_set_led(struct upgt_softc *sc, int action)
{
struct upgt_data *data_cmd;
struct upgt_lmac_mem *mem;
struct upgt_lmac_led *led;
data_cmd = upgt_getbuf(sc);
if (data_cmd == NULL) {
device_printf(sc->sc_dev, "%s: out of buffers.\n", __func__);
return;
}
/*
* Transmit the URB containing the CMD data.
*/
memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
UPGT_MEMSIZE_FRAME_HEAD);
led = (struct upgt_lmac_led *)(mem + 1);
led->header1.flags = UPGT_H1_FLAGS_TX_NO_CALLBACK;
led->header1.type = UPGT_H1_TYPE_CTRL;
led->header1.len = htole16(
sizeof(struct upgt_lmac_led) -
sizeof(struct upgt_lmac_header));
led->header2.reqid = htole32(sc->sc_memaddr_frame_start);
led->header2.type = htole16(UPGT_H2_TYPE_LED);
led->header2.flags = 0;
switch (action) {
case UPGT_LED_OFF:
led->mode = htole16(UPGT_LED_MODE_SET);
led->action_fix = 0;
led->action_tmp = htole16(UPGT_LED_ACTION_OFF);
led->action_tmp_dur = 0;
break;
case UPGT_LED_ON:
led->mode = htole16(UPGT_LED_MODE_SET);
led->action_fix = 0;
led->action_tmp = htole16(UPGT_LED_ACTION_ON);
led->action_tmp_dur = 0;
break;
case UPGT_LED_BLINK:
if (sc->sc_state != IEEE80211_S_RUN) {
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data_cmd, next);
return;
}
if (sc->sc_led_blink) {
/* previous blink was not finished */
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data_cmd, next);
return;
}
led->mode = htole16(UPGT_LED_MODE_SET);
led->action_fix = htole16(UPGT_LED_ACTION_OFF);
led->action_tmp = htole16(UPGT_LED_ACTION_ON);
led->action_tmp_dur = htole16(UPGT_LED_ACTION_TMP_DUR);
/* lock blink */
sc->sc_led_blink = 1;
callout_reset(&sc->sc_led_ch, hz, upgt_set_led_blink, sc);
break;
default:
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data_cmd, next);
return;
}
data_cmd->buflen = sizeof(*mem) + sizeof(*led);
mem->chksum = upgt_chksum_le((uint32_t *)led,
data_cmd->buflen - sizeof(*mem));
upgt_bulk_tx(sc, data_cmd);
}
static void
upgt_set_led_blink(void *arg)
{
struct upgt_softc *sc = arg;
/* blink finished, we are ready for a next one */
sc->sc_led_blink = 0;
}
static void
-upgt_init(void *priv)
+upgt_init(struct upgt_softc *sc)
{
- struct upgt_softc *sc = priv;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- UPGT_LOCK(sc);
- upgt_init_locked(sc);
- UPGT_UNLOCK(sc);
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ieee80211_start_all(ic); /* start all vap's */
-}
-
-static void
-upgt_init_locked(struct upgt_softc *sc)
-{
- struct ifnet *ifp = sc->sc_ifp;
-
UPGT_ASSERT_LOCKED(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- upgt_stop_locked(sc);
+ if (sc->sc_flags & UPGT_FLAG_INITDONE)
+ upgt_stop(sc);
usbd_transfer_start(sc->sc_xfer[UPGT_BULK_RX]);
(void)upgt_set_macfilter(sc, IEEE80211_S_SCAN);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
sc->sc_flags |= UPGT_FLAG_INITDONE;
callout_reset(&sc->sc_watchdog_ch, hz, upgt_watchdog, sc);
}
static int
upgt_set_macfilter(struct upgt_softc *sc, uint8_t state)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni;
struct upgt_data *data_cmd;
struct upgt_lmac_mem *mem;
struct upgt_lmac_filter *filter;
- uint8_t broadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
UPGT_ASSERT_LOCKED(sc);
data_cmd = upgt_getbuf(sc);
if (data_cmd == NULL) {
device_printf(sc->sc_dev, "out of TX buffers.\n");
return (ENOBUFS);
}
/*
* Transmit the URB containing the CMD data.
*/
memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
UPGT_MEMSIZE_FRAME_HEAD);
filter = (struct upgt_lmac_filter *)(mem + 1);
filter->header1.flags = UPGT_H1_FLAGS_TX_NO_CALLBACK;
filter->header1.type = UPGT_H1_TYPE_CTRL;
filter->header1.len = htole16(
sizeof(struct upgt_lmac_filter) -
sizeof(struct upgt_lmac_header));
filter->header2.reqid = htole32(sc->sc_memaddr_frame_start);
filter->header2.type = htole16(UPGT_H2_TYPE_MACFILTER);
filter->header2.flags = 0;
switch (state) {
case IEEE80211_S_INIT:
DPRINTF(sc, UPGT_DEBUG_STATE, "%s: set MAC filter to INIT\n",
__func__);
filter->type = htole16(UPGT_FILTER_TYPE_RESET);
break;
case IEEE80211_S_SCAN:
DPRINTF(sc, UPGT_DEBUG_STATE,
"set MAC filter to SCAN (bssid %s)\n",
- ether_sprintf(broadcast));
+ ether_sprintf(ieee80211broadcastaddr));
filter->type = htole16(UPGT_FILTER_TYPE_NONE);
- IEEE80211_ADDR_COPY(filter->dst, sc->sc_myaddr);
- IEEE80211_ADDR_COPY(filter->src, broadcast);
+ IEEE80211_ADDR_COPY(filter->dst,
+ vap ? vap->iv_myaddr : ic->ic_macaddr);
+ IEEE80211_ADDR_COPY(filter->src, ieee80211broadcastaddr);
filter->unknown1 = htole16(UPGT_FILTER_UNKNOWN1);
filter->rxaddr = htole32(sc->sc_memaddr_rx_start);
filter->unknown2 = htole16(UPGT_FILTER_UNKNOWN2);
filter->rxhw = htole32(sc->sc_eeprom_hwrx);
filter->unknown3 = htole16(UPGT_FILTER_UNKNOWN3);
break;
case IEEE80211_S_RUN:
ni = ieee80211_ref_node(vap->iv_bss);
/* XXX monitor mode isn't tested yet. */
if (vap->iv_opmode == IEEE80211_M_MONITOR) {
filter->type = htole16(UPGT_FILTER_TYPE_MONITOR);
- IEEE80211_ADDR_COPY(filter->dst, sc->sc_myaddr);
+ IEEE80211_ADDR_COPY(filter->dst,
+ vap ? vap->iv_myaddr : ic->ic_macaddr);
IEEE80211_ADDR_COPY(filter->src, ni->ni_bssid);
filter->unknown1 = htole16(UPGT_FILTER_MONITOR_UNKNOWN1);
filter->rxaddr = htole32(sc->sc_memaddr_rx_start);
filter->unknown2 = htole16(UPGT_FILTER_MONITOR_UNKNOWN2);
filter->rxhw = htole32(sc->sc_eeprom_hwrx);
filter->unknown3 = htole16(UPGT_FILTER_MONITOR_UNKNOWN3);
} else {
DPRINTF(sc, UPGT_DEBUG_STATE,
"set MAC filter to RUN (bssid %s)\n",
ether_sprintf(ni->ni_bssid));
filter->type = htole16(UPGT_FILTER_TYPE_STA);
- IEEE80211_ADDR_COPY(filter->dst, sc->sc_myaddr);
+ IEEE80211_ADDR_COPY(filter->dst,
+ vap ? vap->iv_myaddr : ic->ic_macaddr);
IEEE80211_ADDR_COPY(filter->src, ni->ni_bssid);
filter->unknown1 = htole16(UPGT_FILTER_UNKNOWN1);
filter->rxaddr = htole32(sc->sc_memaddr_rx_start);
filter->unknown2 = htole16(UPGT_FILTER_UNKNOWN2);
filter->rxhw = htole32(sc->sc_eeprom_hwrx);
filter->unknown3 = htole16(UPGT_FILTER_UNKNOWN3);
}
ieee80211_free_node(ni);
break;
default:
device_printf(sc->sc_dev,
"MAC filter does not know that state\n");
break;
}
data_cmd->buflen = sizeof(*mem) + sizeof(*filter);
mem->chksum = upgt_chksum_le((uint32_t *)filter,
data_cmd->buflen - sizeof(*mem));
upgt_bulk_tx(sc, data_cmd);
return (0);
}
static void
upgt_setup_rates(struct ieee80211vap *vap, struct ieee80211com *ic)
{
- struct upgt_softc *sc = vap->iv_ic->ic_softc;
+ struct upgt_softc *sc = ic->ic_softc;
const struct ieee80211_txparam *tp;
/*
* 0x01 = OFMD6 0x10 = DS1
* 0x04 = OFDM9 0x11 = DS2
* 0x06 = OFDM12 0x12 = DS5
* 0x07 = OFDM18 0x13 = DS11
* 0x08 = OFDM24
* 0x09 = OFDM36
* 0x0a = OFDM48
* 0x0b = OFDM54
*/
const uint8_t rateset_auto_11b[] =
{ 0x13, 0x13, 0x12, 0x11, 0x11, 0x10, 0x10, 0x10 };
const uint8_t rateset_auto_11g[] =
{ 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x04, 0x01 };
const uint8_t rateset_fix_11bg[] =
{ 0x10, 0x11, 0x12, 0x13, 0x01, 0x04, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b };
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
/* XXX */
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) {
/*
* Automatic rate control is done by the device.
* We just pass the rateset from which the device
* will pickup a rate.
*/
if (ic->ic_curmode == IEEE80211_MODE_11B)
memcpy(sc->sc_cur_rateset, rateset_auto_11b,
sizeof(sc->sc_cur_rateset));
if (ic->ic_curmode == IEEE80211_MODE_11G ||
ic->ic_curmode == IEEE80211_MODE_AUTO)
memcpy(sc->sc_cur_rateset, rateset_auto_11g,
sizeof(sc->sc_cur_rateset));
} else {
/* set a fixed rate */
memset(sc->sc_cur_rateset, rateset_fix_11bg[tp->ucastrate],
sizeof(sc->sc_cur_rateset));
}
}
static void
upgt_set_multi(void *arg)
{
- struct upgt_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- if (!(ifp->if_flags & IFF_UP))
- return;
+ /* XXX don't know how to set a device. Lack of docs. */
+}
- /*
- * XXX don't know how to set a device. Lack of docs. Just try to set
- * IFF_ALLMULTI flag here.
- */
- ifp->if_flags |= IFF_ALLMULTI;
+static int
+upgt_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct upgt_softc *sc = ic->ic_softc;
+ int error;
+
+ UPGT_LOCK(sc);
+ if ((sc->sc_flags & UPGT_FLAG_INITDONE) == 0) {
+ UPGT_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ UPGT_UNLOCK(sc);
+ return (error);
+ }
+ upgt_start(sc);
+ UPGT_UNLOCK(sc);
+
+ return (0);
}
static void
-upgt_start(struct ifnet *ifp)
+upgt_start(struct upgt_softc *sc)
{
- struct upgt_softc *sc = ifp->if_softc;
struct upgt_data *data_tx;
struct ieee80211_node *ni;
struct mbuf *m;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ UPGT_ASSERT_LOCKED(sc);
+
+ if ((sc->sc_flags & UPGT_FLAG_INITDONE) == 0)
return;
- UPGT_LOCK(sc);
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
-
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
data_tx = upgt_gettxbuf(sc);
if (data_tx == NULL) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
+ mbufq_prepend(&sc->sc_snd, m);
break;
}
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
m->m_pkthdr.rcvif = NULL;
if (upgt_tx_start(sc, m, ni, data_tx) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, data_tx, next);
UPGT_STAT_INC(sc, st_tx_inactive);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
continue;
}
sc->sc_tx_timer = 5;
}
- UPGT_UNLOCK(sc);
}
static int
upgt_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct upgt_softc *sc = ic->ic_softc;
struct upgt_data *data_tx = NULL;
+ UPGT_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(sc->sc_flags & UPGT_FLAG_INITDONE)) {
m_freem(m);
ieee80211_free_node(ni);
+ UPGT_UNLOCK(sc);
return ENETDOWN;
}
- UPGT_LOCK(sc);
data_tx = upgt_gettxbuf(sc);
if (data_tx == NULL) {
ieee80211_free_node(ni);
m_freem(m);
UPGT_UNLOCK(sc);
return (ENOBUFS);
}
if (upgt_tx_start(sc, m, ni, data_tx) != 0) {
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, data_tx, next);
UPGT_STAT_INC(sc, st_tx_inactive);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
UPGT_UNLOCK(sc);
return (EIO);
}
UPGT_UNLOCK(sc);
sc->sc_tx_timer = 5;
return (0);
}
static void
upgt_watchdog(void *arg)
{
struct upgt_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
if (sc->sc_tx_timer > 0) {
if (--sc->sc_tx_timer == 0) {
device_printf(sc->sc_dev, "watchdog timeout\n");
- /* upgt_init(ifp); XXX needs a process context ? */
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ /* upgt_init(sc); XXX needs a process context ? */
+ counter_u64_add(ic->ic_oerrors, 1);
return;
}
callout_reset(&sc->sc_watchdog_ch, hz, upgt_watchdog, sc);
}
}
static uint32_t
upgt_mem_alloc(struct upgt_softc *sc)
{
int i;
for (i = 0; i < sc->sc_memory.pages; i++) {
if (sc->sc_memory.page[i].used == 0) {
sc->sc_memory.page[i].used = 1;
return (sc->sc_memory.page[i].addr);
}
}
return (0);
}
static void
upgt_scan_start(struct ieee80211com *ic)
{
/* do nothing. */
}
static void
upgt_scan_end(struct ieee80211com *ic)
{
/* do nothing. */
}
static void
upgt_set_channel(struct ieee80211com *ic)
{
struct upgt_softc *sc = ic->ic_softc;
UPGT_LOCK(sc);
upgt_set_chan(sc, ic->ic_curchan);
UPGT_UNLOCK(sc);
}
static void
upgt_set_chan(struct upgt_softc *sc, struct ieee80211_channel *c)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct upgt_data *data_cmd;
struct upgt_lmac_mem *mem;
struct upgt_lmac_channel *chan;
int channel;
UPGT_ASSERT_LOCKED(sc);
channel = ieee80211_chan2ieee(ic, c);
if (channel == 0 || channel == IEEE80211_CHAN_ANY) {
/* XXX should NEVER happen */
device_printf(sc->sc_dev,
"%s: invalid channel %x\n", __func__, channel);
return;
}
DPRINTF(sc, UPGT_DEBUG_STATE, "%s: channel %d\n", __func__, channel);
data_cmd = upgt_getbuf(sc);
if (data_cmd == NULL) {
device_printf(sc->sc_dev, "%s: out of buffers.\n", __func__);
return;
}
/*
* Transmit the URB containing the CMD data.
*/
memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
UPGT_MEMSIZE_FRAME_HEAD);
chan = (struct upgt_lmac_channel *)(mem + 1);
chan->header1.flags = UPGT_H1_FLAGS_TX_NO_CALLBACK;
chan->header1.type = UPGT_H1_TYPE_CTRL;
chan->header1.len = htole16(
sizeof(struct upgt_lmac_channel) - sizeof(struct upgt_lmac_header));
chan->header2.reqid = htole32(sc->sc_memaddr_frame_start);
chan->header2.type = htole16(UPGT_H2_TYPE_CHANNEL);
chan->header2.flags = 0;
chan->unknown1 = htole16(UPGT_CHANNEL_UNKNOWN1);
chan->unknown2 = htole16(UPGT_CHANNEL_UNKNOWN2);
chan->freq6 = sc->sc_eeprom_freq6[channel];
chan->settings = sc->sc_eeprom_freq6_settings;
chan->unknown3 = UPGT_CHANNEL_UNKNOWN3;
memcpy(chan->freq3_1, &sc->sc_eeprom_freq3[channel].data,
sizeof(chan->freq3_1));
memcpy(chan->freq4, &sc->sc_eeprom_freq4[channel],
sizeof(sc->sc_eeprom_freq4[channel]));
memcpy(chan->freq3_2, &sc->sc_eeprom_freq3[channel].data,
sizeof(chan->freq3_2));
data_cmd->buflen = sizeof(*mem) + sizeof(*chan);
mem->chksum = upgt_chksum_le((uint32_t *)chan,
data_cmd->buflen - sizeof(*mem));
upgt_bulk_tx(sc, data_cmd);
}
static struct ieee80211vap *
upgt_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct upgt_vap *uvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
uvp = (struct upgt_vap *) malloc(sizeof(struct upgt_vap),
M_80211_VAP, M_NOWAIT | M_ZERO);
if (uvp == NULL)
return NULL;
vap = &uvp->vap;
/* enable s/w bmiss handling for sta mode */
if (ieee80211_vap_setup(ic, vap, name, unit, opmode,
- flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) {
+ flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) {
/* out of memory */
free(uvp, M_80211_VAP);
return (NULL);
}
/* override state transition machine */
uvp->newstate = vap->iv_newstate;
vap->iv_newstate = upgt_newstate;
/* setup device rates */
upgt_setup_rates(vap, ic);
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
- ieee80211_media_status);
+ ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return vap;
}
static int
upgt_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct upgt_vap *uvp = UPGT_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct upgt_softc *sc = ic->ic_softc;
/* do it in a process context */
sc->sc_state = nstate;
IEEE80211_UNLOCK(ic);
UPGT_LOCK(sc);
callout_stop(&sc->sc_led_ch);
callout_stop(&sc->sc_watchdog_ch);
switch (nstate) {
case IEEE80211_S_INIT:
/* do not accept any frames if the device is down */
(void)upgt_set_macfilter(sc, sc->sc_state);
upgt_set_led(sc, UPGT_LED_OFF);
break;
case IEEE80211_S_SCAN:
upgt_set_chan(sc, ic->ic_curchan);
break;
case IEEE80211_S_AUTH:
upgt_set_chan(sc, ic->ic_curchan);
break;
case IEEE80211_S_ASSOC:
break;
case IEEE80211_S_RUN:
upgt_set_macfilter(sc, sc->sc_state);
upgt_set_led(sc, UPGT_LED_ON);
break;
default:
break;
}
UPGT_UNLOCK(sc);
IEEE80211_LOCK(ic);
return (uvp->newstate(vap, nstate, arg));
}
static void
upgt_vap_delete(struct ieee80211vap *vap)
{
struct upgt_vap *uvp = UPGT_VAP(vap);
ieee80211_vap_detach(vap);
free(uvp, M_80211_VAP);
}
static void
upgt_update_mcast(struct ieee80211com *ic)
{
struct upgt_softc *sc = ic->ic_softc;
upgt_set_multi(sc);
}
static int
upgt_eeprom_parse(struct upgt_softc *sc)
{
+ struct ieee80211com *ic = &sc->sc_ic;
struct upgt_eeprom_header *eeprom_header;
struct upgt_eeprom_option *eeprom_option;
uint16_t option_len;
uint16_t option_type;
uint16_t preamble_len;
int option_end = 0;
/* calculate eeprom options start offset */
eeprom_header = (struct upgt_eeprom_header *)sc->sc_eeprom;
preamble_len = le16toh(eeprom_header->preamble_len);
eeprom_option = (struct upgt_eeprom_option *)(sc->sc_eeprom +
(sizeof(struct upgt_eeprom_header) + preamble_len));
while (!option_end) {
/* sanity check */
if (eeprom_option >= (struct upgt_eeprom_option *)
(sc->sc_eeprom + UPGT_EEPROM_SIZE)) {
return (EINVAL);
}
/* the eeprom option length is stored in words */
option_len =
(le16toh(eeprom_option->len) - 1) * sizeof(uint16_t);
option_type =
le16toh(eeprom_option->type);
/* sanity check */
if (option_len == 0 || option_len >= UPGT_EEPROM_SIZE)
return (EINVAL);
switch (option_type) {
case UPGT_EEPROM_TYPE_NAME:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM name len=%d\n", option_len);
break;
case UPGT_EEPROM_TYPE_SERIAL:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM serial len=%d\n", option_len);
break;
case UPGT_EEPROM_TYPE_MAC:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM mac len=%d\n", option_len);
- IEEE80211_ADDR_COPY(sc->sc_myaddr, eeprom_option->data);
+ IEEE80211_ADDR_COPY(ic->ic_macaddr,
+ eeprom_option->data);
break;
case UPGT_EEPROM_TYPE_HWRX:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM hwrx len=%d\n", option_len);
upgt_eeprom_parse_hwrx(sc, eeprom_option->data);
break;
case UPGT_EEPROM_TYPE_CHIP:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM chip len=%d\n", option_len);
break;
case UPGT_EEPROM_TYPE_FREQ3:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM freq3 len=%d\n", option_len);
upgt_eeprom_parse_freq3(sc, eeprom_option->data,
option_len);
break;
case UPGT_EEPROM_TYPE_FREQ4:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM freq4 len=%d\n", option_len);
upgt_eeprom_parse_freq4(sc, eeprom_option->data,
option_len);
break;
case UPGT_EEPROM_TYPE_FREQ5:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM freq5 len=%d\n", option_len);
break;
case UPGT_EEPROM_TYPE_FREQ6:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM freq6 len=%d\n", option_len);
upgt_eeprom_parse_freq6(sc, eeprom_option->data,
option_len);
break;
case UPGT_EEPROM_TYPE_END:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM end len=%d\n", option_len);
option_end = 1;
break;
case UPGT_EEPROM_TYPE_OFF:
DPRINTF(sc, UPGT_DEBUG_FW,
"%s: EEPROM off without end option\n", __func__);
return (EIO);
default:
DPRINTF(sc, UPGT_DEBUG_FW,
"EEPROM unknown type 0x%04x len=%d\n",
option_type, option_len);
break;
}
/* jump to next EEPROM option */
eeprom_option = (struct upgt_eeprom_option *)
(eeprom_option->data + option_len);
}
return (0);
}
static void
upgt_eeprom_parse_freq3(struct upgt_softc *sc, uint8_t *data, int len)
{
struct upgt_eeprom_freq3_header *freq3_header;
struct upgt_lmac_freq3 *freq3;
int i;
int elements;
int flags;
unsigned channel;
freq3_header = (struct upgt_eeprom_freq3_header *)data;
freq3 = (struct upgt_lmac_freq3 *)(freq3_header + 1);
flags = freq3_header->flags;
elements = freq3_header->elements;
DPRINTF(sc, UPGT_DEBUG_FW, "flags=0x%02x elements=%d\n",
flags, elements);
if (elements >= (int)(UPGT_EEPROM_SIZE / sizeof(freq3[0])))
return;
for (i = 0; i < elements; i++) {
channel = ieee80211_mhz2ieee(le16toh(freq3[i].freq), 0);
if (channel >= IEEE80211_CHAN_MAX)
continue;
sc->sc_eeprom_freq3[channel] = freq3[i];
DPRINTF(sc, UPGT_DEBUG_FW, "frequence=%d, channel=%d\n",
le16toh(sc->sc_eeprom_freq3[channel].freq), channel);
}
}
void
upgt_eeprom_parse_freq4(struct upgt_softc *sc, uint8_t *data, int len)
{
struct upgt_eeprom_freq4_header *freq4_header;
struct upgt_eeprom_freq4_1 *freq4_1;
struct upgt_eeprom_freq4_2 *freq4_2;
int i;
int j;
int elements;
int settings;
int flags;
unsigned channel;
freq4_header = (struct upgt_eeprom_freq4_header *)data;
freq4_1 = (struct upgt_eeprom_freq4_1 *)(freq4_header + 1);
flags = freq4_header->flags;
elements = freq4_header->elements;
settings = freq4_header->settings;
/* we need this value later */
sc->sc_eeprom_freq6_settings = freq4_header->settings;
DPRINTF(sc, UPGT_DEBUG_FW, "flags=0x%02x elements=%d settings=%d\n",
flags, elements, settings);
if (elements >= (int)(UPGT_EEPROM_SIZE / sizeof(freq4_1[0])))
return;
for (i = 0; i < elements; i++) {
channel = ieee80211_mhz2ieee(le16toh(freq4_1[i].freq), 0);
if (channel >= IEEE80211_CHAN_MAX)
continue;
freq4_2 = (struct upgt_eeprom_freq4_2 *)freq4_1[i].data;
for (j = 0; j < settings; j++) {
sc->sc_eeprom_freq4[channel][j].cmd = freq4_2[j];
sc->sc_eeprom_freq4[channel][j].pad = 0;
}
DPRINTF(sc, UPGT_DEBUG_FW, "frequence=%d, channel=%d\n",
le16toh(freq4_1[i].freq), channel);
}
}
void
upgt_eeprom_parse_freq6(struct upgt_softc *sc, uint8_t *data, int len)
{
struct upgt_lmac_freq6 *freq6;
int i;
int elements;
unsigned channel;
freq6 = (struct upgt_lmac_freq6 *)data;
elements = len / sizeof(struct upgt_lmac_freq6);
DPRINTF(sc, UPGT_DEBUG_FW, "elements=%d\n", elements);
if (elements >= (int)(UPGT_EEPROM_SIZE / sizeof(freq6[0])))
return;
for (i = 0; i < elements; i++) {
channel = ieee80211_mhz2ieee(le16toh(freq6[i].freq), 0);
if (channel >= IEEE80211_CHAN_MAX)
continue;
sc->sc_eeprom_freq6[channel] = freq6[i];
DPRINTF(sc, UPGT_DEBUG_FW, "frequence=%d, channel=%d\n",
le16toh(sc->sc_eeprom_freq6[channel].freq), channel);
}
}
static void
upgt_eeprom_parse_hwrx(struct upgt_softc *sc, uint8_t *data)
{
struct upgt_eeprom_option_hwrx *option_hwrx;
option_hwrx = (struct upgt_eeprom_option_hwrx *)data;
sc->sc_eeprom_hwrx = option_hwrx->rxfilter - UPGT_EEPROM_RX_CONST;
DPRINTF(sc, UPGT_DEBUG_FW, "hwrx option value=0x%04x\n",
sc->sc_eeprom_hwrx);
}
static int
upgt_eeprom_read(struct upgt_softc *sc)
{
struct upgt_data *data_cmd;
struct upgt_lmac_mem *mem;
struct upgt_lmac_eeprom *eeprom;
int block, error, offset;
UPGT_LOCK(sc);
usb_pause_mtx(&sc->sc_mtx, 100);
offset = 0;
block = UPGT_EEPROM_BLOCK_SIZE;
while (offset < UPGT_EEPROM_SIZE) {
DPRINTF(sc, UPGT_DEBUG_FW,
"request EEPROM block (offset=%d, len=%d)\n", offset, block);
data_cmd = upgt_getbuf(sc);
if (data_cmd == NULL) {
UPGT_UNLOCK(sc);
return (ENOBUFS);
}
/*
* Transmit the URB containing the CMD data.
*/
memset(data_cmd->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data_cmd->buf;
mem->addr = htole32(sc->sc_memaddr_frame_start +
UPGT_MEMSIZE_FRAME_HEAD);
eeprom = (struct upgt_lmac_eeprom *)(mem + 1);
eeprom->header1.flags = 0;
eeprom->header1.type = UPGT_H1_TYPE_CTRL;
eeprom->header1.len = htole16((
sizeof(struct upgt_lmac_eeprom) -
sizeof(struct upgt_lmac_header)) + block);
eeprom->header2.reqid = htole32(sc->sc_memaddr_frame_start);
eeprom->header2.type = htole16(UPGT_H2_TYPE_EEPROM);
eeprom->header2.flags = 0;
eeprom->offset = htole16(offset);
eeprom->len = htole16(block);
data_cmd->buflen = sizeof(*mem) + sizeof(*eeprom) + block;
mem->chksum = upgt_chksum_le((uint32_t *)eeprom,
data_cmd->buflen - sizeof(*mem));
upgt_bulk_tx(sc, data_cmd);
error = mtx_sleep(sc, &sc->sc_mtx, 0, "eeprom_request", hz);
if (error != 0) {
device_printf(sc->sc_dev,
"timeout while waiting for EEPROM data\n");
UPGT_UNLOCK(sc);
return (EIO);
}
offset += block;
if (UPGT_EEPROM_SIZE - offset < block)
block = UPGT_EEPROM_SIZE - offset;
}
UPGT_UNLOCK(sc);
return (0);
}
/*
* When a rx data came in the function returns a mbuf and a rssi values.
*/
static struct mbuf *
upgt_rxeof(struct usb_xfer *xfer, struct upgt_data *data, int *rssi)
{
struct mbuf *m = NULL;
struct upgt_softc *sc = usbd_xfer_softc(xfer);
struct upgt_lmac_header *header;
struct upgt_lmac_eeprom *eeprom;
uint8_t h1_type;
uint16_t h2_type;
int actlen, sumlen;
usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
UPGT_ASSERT_LOCKED(sc);
if (actlen < 1)
return (NULL);
/* Check only at the very beginning. */
if (!(sc->sc_flags & UPGT_FLAG_FWLOADED) &&
(memcmp(data->buf, "OK", 2) == 0)) {
sc->sc_flags |= UPGT_FLAG_FWLOADED;
wakeup_one(sc);
return (NULL);
}
if (actlen < (int)UPGT_RX_MINSZ)
return (NULL);
/*
* Check what type of frame came in.
*/
header = (struct upgt_lmac_header *)(data->buf + 4);
h1_type = header->header1.type;
h2_type = le16toh(header->header2.type);
if (h1_type == UPGT_H1_TYPE_CTRL && h2_type == UPGT_H2_TYPE_EEPROM) {
eeprom = (struct upgt_lmac_eeprom *)(data->buf + 4);
uint16_t eeprom_offset = le16toh(eeprom->offset);
uint16_t eeprom_len = le16toh(eeprom->len);
DPRINTF(sc, UPGT_DEBUG_FW,
"received EEPROM block (offset=%d, len=%d)\n",
eeprom_offset, eeprom_len);
memcpy(sc->sc_eeprom + eeprom_offset,
data->buf + sizeof(struct upgt_lmac_eeprom) + 4,
eeprom_len);
/* EEPROM data has arrived in time, wakeup. */
wakeup(sc);
} else if (h1_type == UPGT_H1_TYPE_CTRL &&
h2_type == UPGT_H2_TYPE_TX_DONE) {
DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: received 802.11 TX done\n",
__func__);
upgt_tx_done(sc, data->buf + 4);
} else if (h1_type == UPGT_H1_TYPE_RX_DATA ||
h1_type == UPGT_H1_TYPE_RX_DATA_MGMT) {
DPRINTF(sc, UPGT_DEBUG_RECV, "%s: received 802.11 RX data\n",
__func__);
m = upgt_rx(sc, data->buf + 4, le16toh(header->header1.len),
rssi);
} else if (h1_type == UPGT_H1_TYPE_CTRL &&
h2_type == UPGT_H2_TYPE_STATS) {
DPRINTF(sc, UPGT_DEBUG_STAT, "%s: received statistic data\n",
__func__);
/* TODO: what could we do with the statistic data? */
} else {
/* ignore unknown frame types */
DPRINTF(sc, UPGT_DEBUG_INTR,
"received unknown frame type 0x%02x\n",
header->header1.type);
}
return (m);
}
/*
* The firmware awaits a checksum for each frame we send to it.
* The algorithm used therefor is uncommon but somehow similar to CRC32.
*/
static uint32_t
upgt_chksum_le(const uint32_t *buf, size_t size)
{
size_t i;
uint32_t crc = 0;
for (i = 0; i < size; i += sizeof(uint32_t)) {
crc = htole32(crc ^ *buf++);
crc = htole32((crc >> 5) ^ (crc << 3));
}
return (crc);
}
static struct mbuf *
upgt_rx(struct upgt_softc *sc, uint8_t *data, int pkglen, int *rssi)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct upgt_lmac_rx_desc *rxdesc;
struct mbuf *m;
/*
* don't pass packets to the ieee80211 framework if the driver isn't
* RUNNING.
*/
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ if (!(sc->sc_flags & UPGT_FLAG_INITDONE))
return (NULL);
/* access RX packet descriptor */
rxdesc = (struct upgt_lmac_rx_desc *)data;
/* create mbuf which is suitable for strict alignment archs */
KASSERT((pkglen + ETHER_ALIGN) < MCLBYTES,
("A current mbuf storage is small (%d)", pkglen + ETHER_ALIGN));
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->sc_dev, "could not create RX mbuf\n");
return (NULL);
}
m_adj(m, ETHER_ALIGN);
memcpy(mtod(m, char *), rxdesc->data, pkglen);
/* trim FCS */
m->m_len = m->m_pkthdr.len = pkglen - IEEE80211_CRC_LEN;
- m->m_pkthdr.rcvif = ifp;
if (ieee80211_radiotap_active(ic)) {
struct upgt_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
tap->wr_rate = upgt_rx_rate(sc, rxdesc->rate);
tap->wr_antsignal = rxdesc->rssi;
}
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
DPRINTF(sc, UPGT_DEBUG_RX_PROC, "%s: RX done\n", __func__);
*rssi = rxdesc->rssi;
return (m);
}
static uint8_t
upgt_rx_rate(struct upgt_softc *sc, const int rate)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
static const uint8_t cck_upgt2rate[4] = { 2, 4, 11, 22 };
static const uint8_t ofdm_upgt2rate[12] =
{ 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 };
if (ic->ic_curmode == IEEE80211_MODE_11B &&
!(rate < 0 || rate > 3))
return cck_upgt2rate[rate & 0xf];
if (ic->ic_curmode == IEEE80211_MODE_11G &&
!(rate < 0 || rate > 11))
return ofdm_upgt2rate[rate & 0xf];
return (0);
}
static void
upgt_tx_done(struct upgt_softc *sc, uint8_t *data)
{
- struct ifnet *ifp = sc->sc_ifp;
struct upgt_lmac_tx_done_desc *desc;
int i, freed = 0;
UPGT_ASSERT_LOCKED(sc);
desc = (struct upgt_lmac_tx_done_desc *)data;
for (i = 0; i < UPGT_TX_MAXCOUNT; i++) {
struct upgt_data *data_tx = &sc->sc_tx_data[i];
if (data_tx->addr == le32toh(desc->header2.reqid)) {
upgt_mem_free(sc, data_tx->addr);
data_tx->ni = NULL;
data_tx->addr = 0;
data_tx->m = NULL;
DPRINTF(sc, UPGT_DEBUG_TX_PROC,
"TX done: memaddr=0x%08x, status=0x%04x, rssi=%d, ",
le32toh(desc->header2.reqid),
le16toh(desc->status), le16toh(desc->rssi));
DPRINTF(sc, UPGT_DEBUG_TX_PROC, "seq=%d\n",
le16toh(desc->seq));
freed++;
}
}
if (freed != 0) {
- sc->sc_tx_timer = 0;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
UPGT_UNLOCK(sc);
- upgt_start(ifp);
+ sc->sc_tx_timer = 0;
+ upgt_start(sc);
UPGT_LOCK(sc);
}
}
static void
upgt_mem_free(struct upgt_softc *sc, uint32_t addr)
{
int i;
for (i = 0; i < sc->sc_memory.pages; i++) {
if (sc->sc_memory.page[i].addr == addr) {
sc->sc_memory.page[i].used = 0;
return;
}
}
device_printf(sc->sc_dev,
"could not free memory address 0x%08x\n", addr);
}
static int
upgt_fw_load(struct upgt_softc *sc)
{
const struct firmware *fw;
struct upgt_data *data_cmd;
struct upgt_fw_x2_header *x2;
char start_fwload_cmd[] = { 0x3c, 0x0d };
int error = 0;
size_t offset;
int bsize;
int n;
uint32_t crc32;
fw = firmware_get(upgt_fwname);
if (fw == NULL) {
device_printf(sc->sc_dev, "could not read microcode %s\n",
upgt_fwname);
return (EIO);
}
UPGT_LOCK(sc);
/* send firmware start load command */
data_cmd = upgt_getbuf(sc);
if (data_cmd == NULL) {
error = ENOBUFS;
goto fail;
}
data_cmd->buflen = sizeof(start_fwload_cmd);
memcpy(data_cmd->buf, start_fwload_cmd, data_cmd->buflen);
upgt_bulk_tx(sc, data_cmd);
/* send X2 header */
data_cmd = upgt_getbuf(sc);
if (data_cmd == NULL) {
error = ENOBUFS;
goto fail;
}
data_cmd->buflen = sizeof(struct upgt_fw_x2_header);
x2 = (struct upgt_fw_x2_header *)data_cmd->buf;
memcpy(x2->signature, UPGT_X2_SIGNATURE, UPGT_X2_SIGNATURE_SIZE);
x2->startaddr = htole32(UPGT_MEMADDR_FIRMWARE_START);
x2->len = htole32(fw->datasize);
x2->crc = upgt_crc32_le((uint8_t *)data_cmd->buf +
UPGT_X2_SIGNATURE_SIZE,
sizeof(struct upgt_fw_x2_header) - UPGT_X2_SIGNATURE_SIZE -
sizeof(uint32_t));
upgt_bulk_tx(sc, data_cmd);
/* download firmware */
for (offset = 0; offset < fw->datasize; offset += bsize) {
if (fw->datasize - offset > UPGT_FW_BLOCK_SIZE)
bsize = UPGT_FW_BLOCK_SIZE;
else
bsize = fw->datasize - offset;
data_cmd = upgt_getbuf(sc);
if (data_cmd == NULL) {
error = ENOBUFS;
goto fail;
}
n = upgt_fw_copy((const uint8_t *)fw->data + offset,
data_cmd->buf, bsize);
data_cmd->buflen = bsize;
upgt_bulk_tx(sc, data_cmd);
DPRINTF(sc, UPGT_DEBUG_FW, "FW offset=%d, read=%d, sent=%d\n",
offset, n, bsize);
bsize = n;
}
DPRINTF(sc, UPGT_DEBUG_FW, "%s: firmware downloaded\n", __func__);
/* load firmware */
data_cmd = upgt_getbuf(sc);
if (data_cmd == NULL) {
error = ENOBUFS;
goto fail;
}
crc32 = upgt_crc32_le(fw->data, fw->datasize);
*((uint32_t *)(data_cmd->buf) ) = crc32;
*((uint8_t *)(data_cmd->buf) + 4) = 'g';
*((uint8_t *)(data_cmd->buf) + 5) = '\r';
data_cmd->buflen = 6;
upgt_bulk_tx(sc, data_cmd);
/* waiting 'OK' response. */
usbd_transfer_start(sc->sc_xfer[UPGT_BULK_RX]);
error = mtx_sleep(sc, &sc->sc_mtx, 0, "upgtfw", 2 * hz);
if (error != 0) {
device_printf(sc->sc_dev, "firmware load failed\n");
error = EIO;
}
DPRINTF(sc, UPGT_DEBUG_FW, "%s: firmware loaded\n", __func__);
fail:
UPGT_UNLOCK(sc);
firmware_put(fw, FIRMWARE_UNLOAD);
return (error);
}
static uint32_t
upgt_crc32_le(const void *buf, size_t size)
{
uint32_t crc;
crc = ether_crc32_le(buf, size);
/* apply final XOR value as common for CRC-32 */
crc = htole32(crc ^ 0xffffffffU);
return (crc);
}
/*
* While copying the version 2 firmware, we need to replace two characters:
*
* 0x7e -> 0x7d 0x5e
* 0x7d -> 0x7d 0x5d
*/
static int
upgt_fw_copy(const uint8_t *src, char *dst, int size)
{
int i, j;
for (i = 0, j = 0; i < size && j < size; i++) {
switch (src[i]) {
case 0x7e:
dst[j] = 0x7d;
j++;
dst[j] = 0x5e;
j++;
break;
case 0x7d:
dst[j] = 0x7d;
j++;
dst[j] = 0x5d;
j++;
break;
default:
dst[j] = src[i];
j++;
break;
}
}
return (i);
}
static int
upgt_mem_init(struct upgt_softc *sc)
{
int i;
for (i = 0; i < UPGT_MEMORY_MAX_PAGES; i++) {
sc->sc_memory.page[i].used = 0;
if (i == 0) {
/*
* The first memory page is always reserved for
* command data.
*/
sc->sc_memory.page[i].addr =
sc->sc_memaddr_frame_start + MCLBYTES;
} else {
sc->sc_memory.page[i].addr =
sc->sc_memory.page[i - 1].addr + MCLBYTES;
}
if (sc->sc_memory.page[i].addr + MCLBYTES >=
sc->sc_memaddr_frame_end)
break;
DPRINTF(sc, UPGT_DEBUG_FW, "memory address page %d=0x%08x\n",
i, sc->sc_memory.page[i].addr);
}
sc->sc_memory.pages = i;
DPRINTF(sc, UPGT_DEBUG_FW, "memory pages=%d\n", sc->sc_memory.pages);
return (0);
}
static int
upgt_fw_verify(struct upgt_softc *sc)
{
const struct firmware *fw;
const struct upgt_fw_bra_option *bra_opt;
const struct upgt_fw_bra_descr *descr;
const uint8_t *p;
const uint32_t *uc;
uint32_t bra_option_type, bra_option_len;
size_t offset;
int bra_end = 0;
int error = 0;
fw = firmware_get(upgt_fwname);
if (fw == NULL) {
device_printf(sc->sc_dev, "could not read microcode %s\n",
upgt_fwname);
return EIO;
}
/*
* Seek to beginning of Boot Record Area (BRA).
*/
for (offset = 0; offset < fw->datasize; offset += sizeof(*uc)) {
uc = (const uint32_t *)((const uint8_t *)fw->data + offset);
if (*uc == 0)
break;
}
for (; offset < fw->datasize; offset += sizeof(*uc)) {
uc = (const uint32_t *)((const uint8_t *)fw->data + offset);
if (*uc != 0)
break;
}
if (offset == fw->datasize) {
device_printf(sc->sc_dev,
"firmware Boot Record Area not found\n");
error = EIO;
goto fail;
}
DPRINTF(sc, UPGT_DEBUG_FW,
"firmware Boot Record Area found at offset %d\n", offset);
/*
* Parse Boot Record Area (BRA) options.
*/
while (offset < fw->datasize && bra_end == 0) {
/* get current BRA option */
p = (const uint8_t *)fw->data + offset;
bra_opt = (const struct upgt_fw_bra_option *)p;
bra_option_type = le32toh(bra_opt->type);
bra_option_len = le32toh(bra_opt->len) * sizeof(*uc);
switch (bra_option_type) {
case UPGT_BRA_TYPE_FW:
DPRINTF(sc, UPGT_DEBUG_FW, "UPGT_BRA_TYPE_FW len=%d\n",
bra_option_len);
if (bra_option_len != UPGT_BRA_FWTYPE_SIZE) {
device_printf(sc->sc_dev,
"wrong UPGT_BRA_TYPE_FW len\n");
error = EIO;
goto fail;
}
if (memcmp(UPGT_BRA_FWTYPE_LM86, bra_opt->data,
bra_option_len) == 0) {
sc->sc_fw_type = UPGT_FWTYPE_LM86;
break;
}
if (memcmp(UPGT_BRA_FWTYPE_LM87, bra_opt->data,
bra_option_len) == 0) {
sc->sc_fw_type = UPGT_FWTYPE_LM87;
break;
}
device_printf(sc->sc_dev,
"unsupported firmware type\n");
error = EIO;
goto fail;
case UPGT_BRA_TYPE_VERSION:
DPRINTF(sc, UPGT_DEBUG_FW,
"UPGT_BRA_TYPE_VERSION len=%d\n", bra_option_len);
break;
case UPGT_BRA_TYPE_DEPIF:
DPRINTF(sc, UPGT_DEBUG_FW,
"UPGT_BRA_TYPE_DEPIF len=%d\n", bra_option_len);
break;
case UPGT_BRA_TYPE_EXPIF:
DPRINTF(sc, UPGT_DEBUG_FW,
"UPGT_BRA_TYPE_EXPIF len=%d\n", bra_option_len);
break;
case UPGT_BRA_TYPE_DESCR:
DPRINTF(sc, UPGT_DEBUG_FW,
"UPGT_BRA_TYPE_DESCR len=%d\n", bra_option_len);
descr = (const struct upgt_fw_bra_descr *)bra_opt->data;
sc->sc_memaddr_frame_start =
le32toh(descr->memaddr_space_start);
sc->sc_memaddr_frame_end =
le32toh(descr->memaddr_space_end);
DPRINTF(sc, UPGT_DEBUG_FW,
"memory address space start=0x%08x\n",
sc->sc_memaddr_frame_start);
DPRINTF(sc, UPGT_DEBUG_FW,
"memory address space end=0x%08x\n",
sc->sc_memaddr_frame_end);
break;
case UPGT_BRA_TYPE_END:
DPRINTF(sc, UPGT_DEBUG_FW, "UPGT_BRA_TYPE_END len=%d\n",
bra_option_len);
bra_end = 1;
break;
default:
DPRINTF(sc, UPGT_DEBUG_FW, "unknown BRA option len=%d\n",
bra_option_len);
error = EIO;
goto fail;
}
/* jump to next BRA option */
offset += sizeof(struct upgt_fw_bra_option) + bra_option_len;
}
DPRINTF(sc, UPGT_DEBUG_FW, "%s: firmware verified", __func__);
fail:
firmware_put(fw, FIRMWARE_UNLOAD);
return (error);
}
static void
upgt_bulk_tx(struct upgt_softc *sc, struct upgt_data *data)
{
UPGT_ASSERT_LOCKED(sc);
STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next);
UPGT_STAT_INC(sc, st_tx_pending);
usbd_transfer_start(sc->sc_xfer[UPGT_BULK_TX]);
}
static int
upgt_device_reset(struct upgt_softc *sc)
{
struct upgt_data *data;
char init_cmd[] = { 0x7e, 0x7e, 0x7e, 0x7e };
UPGT_LOCK(sc);
data = upgt_getbuf(sc);
if (data == NULL) {
UPGT_UNLOCK(sc);
return (ENOBUFS);
}
memcpy(data->buf, init_cmd, sizeof(init_cmd));
data->buflen = sizeof(init_cmd);
upgt_bulk_tx(sc, data);
usb_pause_mtx(&sc->sc_mtx, 100);
UPGT_UNLOCK(sc);
DPRINTF(sc, UPGT_DEBUG_FW, "%s: device initialized\n", __func__);
return (0);
}
static int
upgt_alloc_tx(struct upgt_softc *sc)
{
int i;
STAILQ_INIT(&sc->sc_tx_active);
STAILQ_INIT(&sc->sc_tx_inactive);
STAILQ_INIT(&sc->sc_tx_pending);
for (i = 0; i < UPGT_TX_MAXCOUNT; i++) {
struct upgt_data *data = &sc->sc_tx_data[i];
data->buf = ((uint8_t *)sc->sc_tx_dma_buf) + (i * MCLBYTES);
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next);
UPGT_STAT_INC(sc, st_tx_inactive);
}
return (0);
}
static int
upgt_alloc_rx(struct upgt_softc *sc)
{
int i;
STAILQ_INIT(&sc->sc_rx_active);
STAILQ_INIT(&sc->sc_rx_inactive);
for (i = 0; i < UPGT_RX_MAXCOUNT; i++) {
struct upgt_data *data = &sc->sc_rx_data[i];
data->buf = ((uint8_t *)sc->sc_rx_dma_buf) + (i * MCLBYTES);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
}
return (0);
}
static int
upgt_detach(device_t dev)
{
struct upgt_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
unsigned int x;
/*
* Prevent further allocations from RX/TX/CMD
* data lists and ioctls
*/
UPGT_LOCK(sc);
sc->sc_flags |= UPGT_FLAG_DETACHED;
STAILQ_INIT(&sc->sc_tx_active);
STAILQ_INIT(&sc->sc_tx_inactive);
STAILQ_INIT(&sc->sc_tx_pending);
STAILQ_INIT(&sc->sc_rx_active);
STAILQ_INIT(&sc->sc_rx_inactive);
- UPGT_UNLOCK(sc);
upgt_stop(sc);
+ UPGT_UNLOCK(sc);
callout_drain(&sc->sc_led_ch);
callout_drain(&sc->sc_watchdog_ch);
/* drain USB transfers */
for (x = 0; x != UPGT_N_XFERS; x++)
usbd_transfer_drain(sc->sc_xfer[x]);
/* free data buffers */
UPGT_LOCK(sc);
upgt_free_rx(sc);
upgt_free_tx(sc);
UPGT_UNLOCK(sc);
/* free USB transfers and some data buffers */
usbd_transfer_unsetup(sc->sc_xfer, UPGT_N_XFERS);
ieee80211_ifdetach(ic);
- if_free(ifp);
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static void
upgt_free_rx(struct upgt_softc *sc)
{
int i;
for (i = 0; i < UPGT_RX_MAXCOUNT; i++) {
struct upgt_data *data = &sc->sc_rx_data[i];
data->buf = NULL;
data->ni = NULL;
}
}
static void
upgt_free_tx(struct upgt_softc *sc)
{
int i;
for (i = 0; i < UPGT_TX_MAXCOUNT; i++) {
struct upgt_data *data = &sc->sc_tx_data[i];
if (data->ni != NULL)
ieee80211_free_node(data->ni);
data->buf = NULL;
data->ni = NULL;
}
}
static void
upgt_abort_xfers_locked(struct upgt_softc *sc)
{
int i;
UPGT_ASSERT_LOCKED(sc);
/* abort any pending transfers */
for (i = 0; i < UPGT_N_XFERS; i++)
usbd_transfer_stop(sc->sc_xfer[i]);
}
static void
upgt_abort_xfers(struct upgt_softc *sc)
{
UPGT_LOCK(sc);
upgt_abort_xfers_locked(sc);
UPGT_UNLOCK(sc);
}
#define UPGT_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
static void
upgt_sysctl_node(struct upgt_softc *sc)
{
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child;
struct sysctl_oid *tree;
struct upgt_stat *stats;
stats = &sc->sc_stat;
ctx = device_get_sysctl_ctx(sc->sc_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
NULL, "UPGT statistics");
child = SYSCTL_CHILDREN(tree);
UPGT_SYSCTL_STAT_ADD32(ctx, child, "tx_active",
&stats->st_tx_active, "Active numbers in TX queue");
UPGT_SYSCTL_STAT_ADD32(ctx, child, "tx_inactive",
&stats->st_tx_inactive, "Inactive numbers in TX queue");
UPGT_SYSCTL_STAT_ADD32(ctx, child, "tx_pending",
&stats->st_tx_pending, "Pending numbers in TX queue");
}
#undef UPGT_SYSCTL_STAT_ADD32
static struct upgt_data *
_upgt_getbuf(struct upgt_softc *sc)
{
struct upgt_data *bf;
bf = STAILQ_FIRST(&sc->sc_tx_inactive);
if (bf != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next);
UPGT_STAT_DEC(sc, st_tx_inactive);
} else
bf = NULL;
if (bf == NULL)
DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: %s\n", __func__,
"out of xmit buffers");
return (bf);
}
static struct upgt_data *
upgt_getbuf(struct upgt_softc *sc)
{
struct upgt_data *bf;
UPGT_ASSERT_LOCKED(sc);
bf = _upgt_getbuf(sc);
- if (bf == NULL) {
- struct ifnet *ifp = sc->sc_ifp;
-
+ if (bf == NULL)
DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: stop queue\n", __func__);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- }
return (bf);
}
static struct upgt_data *
upgt_gettxbuf(struct upgt_softc *sc)
{
struct upgt_data *bf;
UPGT_ASSERT_LOCKED(sc);
bf = upgt_getbuf(sc);
if (bf == NULL)
return (NULL);
bf->addr = upgt_mem_alloc(sc);
if (bf->addr == 0) {
- struct ifnet *ifp = sc->sc_ifp;
-
DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: no free prism memory!\n",
__func__);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
UPGT_STAT_INC(sc, st_tx_inactive);
- if (!(ifp->if_drv_flags & IFF_DRV_OACTIVE))
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
return (NULL);
}
return (bf);
}
static int
upgt_tx_start(struct upgt_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
struct upgt_data *data)
{
struct ieee80211vap *vap = ni->ni_vap;
int error = 0, len;
struct ieee80211_frame *wh;
struct ieee80211_key *k;
- struct ifnet *ifp = sc->sc_ifp;
struct upgt_lmac_mem *mem;
struct upgt_lmac_tx_desc *txdesc;
UPGT_ASSERT_LOCKED(sc);
upgt_set_led(sc, UPGT_LED_BLINK);
/*
* Software crypto.
*/
wh = mtod(m, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
device_printf(sc->sc_dev,
"ieee80211_crypto_encap returns NULL.\n");
error = EIO;
goto done;
}
/* in case packet header moved, reset pointer */
wh = mtod(m, struct ieee80211_frame *);
}
/* Transmit the URB containing the TX data. */
memset(data->buf, 0, MCLBYTES);
mem = (struct upgt_lmac_mem *)data->buf;
mem->addr = htole32(data->addr);
txdesc = (struct upgt_lmac_tx_desc *)(mem + 1);
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
IEEE80211_FC0_TYPE_MGT) {
/* mgmt frames */
txdesc->header1.flags = UPGT_H1_FLAGS_TX_MGMT;
/* always send mgmt frames at lowest rate (DS1) */
memset(txdesc->rates, 0x10, sizeof(txdesc->rates));
} else {
/* data frames */
txdesc->header1.flags = UPGT_H1_FLAGS_TX_DATA;
memcpy(txdesc->rates, sc->sc_cur_rateset, sizeof(txdesc->rates));
}
txdesc->header1.type = UPGT_H1_TYPE_TX_DATA;
txdesc->header1.len = htole16(m->m_pkthdr.len);
txdesc->header2.reqid = htole32(data->addr);
txdesc->header2.type = htole16(UPGT_H2_TYPE_TX_ACK_YES);
txdesc->header2.flags = htole16(UPGT_H2_FLAGS_TX_ACK_YES);
txdesc->type = htole32(UPGT_TX_DESC_TYPE_DATA);
txdesc->pad3[0] = UPGT_TX_DESC_PAD3_SIZE;
if (ieee80211_radiotap_active_vap(vap)) {
struct upgt_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = 0; /* XXX where to get from? */
ieee80211_radiotap_tx(vap, m);
}
/* copy frame below our TX descriptor header */
m_copydata(m, 0, m->m_pkthdr.len,
data->buf + (sizeof(*mem) + sizeof(*txdesc)));
/* calculate frame size */
len = sizeof(*mem) + sizeof(*txdesc) + m->m_pkthdr.len;
/* we need to align the frame to a 4 byte boundary */
len = (len + 3) & ~3;
/* calculate frame checksum */
mem->chksum = upgt_chksum_le((uint32_t *)txdesc, len - sizeof(*mem));
data->ni = ni;
data->m = m;
data->buflen = len;
DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: TX start data sending (%d bytes)\n",
__func__, len);
KASSERT(len <= MCLBYTES, ("mbuf is small for saving data"));
upgt_bulk_tx(sc, data);
done:
/*
* If we don't regulary read the device statistics, the RX queue
* will stall. It's strange, but it works, so we keep reading
* the statistics here. *shrug*
*/
- if (!(ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS) %
+ if (!(vap->iv_ifp->if_get_counter(vap->iv_ifp, IFCOUNTER_OPACKETS) %
UPGT_TX_STAT_INTERVAL))
upgt_get_stats(sc);
return (error);
}
static void
upgt_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct upgt_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *m = NULL;
struct upgt_data *data;
int8_t nf;
int rssi = -1;
UPGT_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data == NULL)
goto setup;
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
m = upgt_rxeof(xfer, data, &rssi);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
/* FALLTHROUGH */
case USB_ST_SETUP:
setup:
data = STAILQ_FIRST(&sc->sc_rx_inactive);
if (data == NULL)
return;
STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next);
STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next);
usbd_xfer_set_frame_data(xfer, 0, data->buf, MCLBYTES);
usbd_transfer_submit(xfer);
/*
* To avoid LOR we should unlock our private mutex here to call
* ieee80211_input() because here is at the end of a USB
* callback and safe to unlock.
*/
UPGT_UNLOCK(sc);
if (m != NULL) {
wh = mtod(m, struct ieee80211_frame *);
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
nf = -95; /* XXX */
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
/* node is no longer needed */
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
m = NULL;
}
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
- !IFQ_IS_EMPTY(&ifp->if_snd))
- upgt_start(ifp);
UPGT_LOCK(sc);
+ upgt_start(sc);
break;
default:
/* needs it to the inactive queue due to a error. */
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
}
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto setup;
}
break;
}
}
static void
upgt_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct upgt_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
struct upgt_data *data;
UPGT_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_tx_active);
if (data == NULL)
goto setup;
STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next);
UPGT_STAT_DEC(sc, st_tx_active);
upgt_txeof(xfer, data);
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next);
UPGT_STAT_INC(sc, st_tx_inactive);
/* FALLTHROUGH */
case USB_ST_SETUP:
setup:
data = STAILQ_FIRST(&sc->sc_tx_pending);
if (data == NULL) {
DPRINTF(sc, UPGT_DEBUG_XMIT, "%s: empty pending queue\n",
__func__);
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next);
UPGT_STAT_DEC(sc, st_tx_pending);
STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next);
UPGT_STAT_INC(sc, st_tx_active);
usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen);
usbd_transfer_submit(xfer);
- UPGT_UNLOCK(sc);
- upgt_start(ifp);
- UPGT_LOCK(sc);
+ upgt_start(sc);
break;
default:
data = STAILQ_FIRST(&sc->sc_tx_active);
if (data == NULL)
goto setup;
if (data->ni != NULL) {
+ if_inc_counter(data->ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(data->ni);
data->ni = NULL;
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto setup;
}
break;
}
}
static device_method_t upgt_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, upgt_match),
DEVMETHOD(device_attach, upgt_attach),
DEVMETHOD(device_detach, upgt_detach),
DEVMETHOD_END
};
static driver_t upgt_driver = {
.name = "upgt",
.methods = upgt_methods,
.size = sizeof(struct upgt_softc)
};
static devclass_t upgt_devclass;
DRIVER_MODULE(if_upgt, uhub, upgt_driver, upgt_devclass, NULL, 0);
MODULE_VERSION(if_upgt, 1);
MODULE_DEPEND(if_upgt, usb, 1, 1, 1);
MODULE_DEPEND(if_upgt, wlan, 1, 1, 1);
MODULE_DEPEND(if_upgt, upgtfw_fw, 1, 1, 1);
Index: head/sys/dev/usb/wlan/if_upgtvar.h
===================================================================
--- head/sys/dev/usb/wlan/if_upgtvar.h (revision 287196)
+++ head/sys/dev/usb/wlan/if_upgtvar.h (revision 287197)
@@ -1,484 +1,482 @@
/* $OpenBSD: if_upgtvar.h,v 1.14 2008/02/02 13:48:44 mglocker Exp $ */
/* $FreeBSD$ */
/*
* Copyright (c) 2007 Marcus Glocker <mglocker@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
struct upgt_softc;
/*
* General values.
*/
enum {
UPGT_BULK_RX,
UPGT_BULK_TX,
UPGT_N_XFERS = 2,
};
#define UPGT_CONFIG_INDEX 0
#define UPGT_IFACE_INDEX 0
#define UPGT_USB_TIMEOUT 1000
#define UPGT_FIRMWARE_TIMEOUT 10
#define UPGT_MEMADDR_FIRMWARE_START 0x00020000 /* 512 bytes large */
#define UPGT_MEMSIZE_FRAME_HEAD 0x0070
#define UPGT_MEMSIZE_RX 0x3500
#define UPGT_RX_MAXCOUNT 6
#define UPGT_TX_MAXCOUNT 128
#define UPGT_TX_STAT_INTERVAL 5
#define UPGT_RX_MINSZ (sizeof(struct upgt_lmac_header) + 4)
/* device flags */
#define UPGT_DEVICE_ATTACHED (1 << 0)
/* leds */
#define UPGT_LED_OFF 0
#define UPGT_LED_ON 1
#define UPGT_LED_BLINK 2
/*
* Firmware.
*/
#define UPGT_FW_BLOCK_SIZE 256
#define UPGT_BRA_FWTYPE_SIZE 4
#define UPGT_BRA_FWTYPE_LM86 "LM86"
#define UPGT_BRA_FWTYPE_LM87 "LM87"
enum upgt_fw_type {
UPGT_FWTYPE_LM86,
UPGT_FWTYPE_LM87
};
#define UPGT_BRA_TYPE_FW 0x80000001
#define UPGT_BRA_TYPE_VERSION 0x80000002
#define UPGT_BRA_TYPE_DEPIF 0x80000003
#define UPGT_BRA_TYPE_EXPIF 0x80000004
#define UPGT_BRA_TYPE_DESCR 0x80000101
#define UPGT_BRA_TYPE_END 0xff0000ff
struct upgt_fw_bra_option {
uint32_t type;
uint32_t len;
uint8_t data[];
} __packed;
struct upgt_fw_bra_descr {
uint32_t unknown1;
uint32_t memaddr_space_start;
uint32_t memaddr_space_end;
uint32_t unknown2;
uint32_t unknown3;
uint8_t rates[20];
} __packed;
#define UPGT_X2_SIGNATURE_SIZE 4
#define UPGT_X2_SIGNATURE "x2 "
struct upgt_fw_x2_header {
uint8_t signature[4];
uint32_t startaddr;
uint32_t len;
uint32_t crc;
} __packed;
/*
* EEPROM.
*/
#define UPGT_EEPROM_SIZE 8192
#define UPGT_EEPROM_BLOCK_SIZE 1020
struct upgt_eeprom_header {
/* 14 bytes */
uint32_t magic;
uint16_t pad1;
uint16_t preamble_len;
uint32_t pad2;
/* data */
} __packed;
#define UPGT_EEPROM_TYPE_END 0x0000
#define UPGT_EEPROM_TYPE_NAME 0x0001
#define UPGT_EEPROM_TYPE_SERIAL 0x0003
#define UPGT_EEPROM_TYPE_MAC 0x0101
#define UPGT_EEPROM_TYPE_HWRX 0x1001
#define UPGT_EEPROM_TYPE_CHIP 0x1002
#define UPGT_EEPROM_TYPE_FREQ3 0x1903
#define UPGT_EEPROM_TYPE_FREQ4 0x1904
#define UPGT_EEPROM_TYPE_FREQ5 0x1905
#define UPGT_EEPROM_TYPE_FREQ6 0x1906
#define UPGT_EEPROM_TYPE_OFF 0xffff
struct upgt_eeprom_option {
uint16_t len;
uint16_t type;
uint8_t data[];
/* data */
} __packed;
#define UPGT_EEPROM_RX_CONST 0x88
struct upgt_eeprom_option_hwrx {
uint32_t pad1;
uint8_t rxfilter;
uint8_t pad2[15];
} __packed;
struct upgt_eeprom_freq3_header {
uint8_t flags;
uint8_t elements;
} __packed;
struct upgt_eeprom_freq4_header {
uint8_t flags;
uint8_t elements;
uint8_t settings;
uint8_t type;
} __packed;
struct upgt_eeprom_freq4_1 {
uint16_t freq;
uint8_t data[50];
} __packed;
struct upgt_eeprom_freq4_2 {
uint16_t head;
uint8_t subtails[4];
uint8_t tail;
} __packed;
/*
* LMAC protocol.
*/
struct upgt_lmac_mem {
uint32_t addr;
uint32_t chksum;
} __packed;
#define UPGT_H1_FLAGS_TX_MGMT 0x00 /* for TX: mgmt frame */
#define UPGT_H1_FLAGS_TX_NO_CALLBACK 0x01 /* for TX: no USB callback */
#define UPGT_H1_FLAGS_TX_DATA 0x10 /* for TX: data frame */
#define UPGT_H1_TYPE_RX_DATA 0x00 /* 802.11 RX data frame */
#define UPGT_H1_TYPE_RX_DATA_MGMT 0x04 /* 802.11 RX mgmt frame */
#define UPGT_H1_TYPE_TX_DATA 0x40 /* 802.11 TX data frame */
#define UPGT_H1_TYPE_CTRL 0x80 /* control frame */
struct upgt_lmac_h1 {
/* 4 bytes */
uint8_t flags;
uint8_t type;
uint16_t len;
} __packed;
#define UPGT_H2_TYPE_TX_ACK_NO 0x0000
#define UPGT_H2_TYPE_TX_ACK_YES 0x0001
#define UPGT_H2_TYPE_MACFILTER 0x0000
#define UPGT_H2_TYPE_CHANNEL 0x0001
#define UPGT_H2_TYPE_TX_DONE 0x0008
#define UPGT_H2_TYPE_STATS 0x000a
#define UPGT_H2_TYPE_EEPROM 0x000c
#define UPGT_H2_TYPE_LED 0x000d
#define UPGT_H2_FLAGS_TX_ACK_NO 0x0101
#define UPGT_H2_FLAGS_TX_ACK_YES 0x0707
struct upgt_lmac_h2 {
/* 8 bytes */
uint32_t reqid;
uint16_t type;
uint16_t flags;
} __packed;
struct upgt_lmac_header {
/* 12 bytes */
struct upgt_lmac_h1 header1;
struct upgt_lmac_h2 header2;
} __packed;
struct upgt_lmac_eeprom {
/* 16 bytes */
struct upgt_lmac_h1 header1;
struct upgt_lmac_h2 header2;
uint16_t offset;
uint16_t len;
/* data */
} __packed;
#define UPGT_FILTER_TYPE_NONE 0x0000
#define UPGT_FILTER_TYPE_STA 0x0001
#define UPGT_FILTER_TYPE_IBSS 0x0002
#define UPGT_FILTER_TYPE_HOSTAP 0x0004
#define UPGT_FILTER_TYPE_MONITOR 0x0010
#define UPGT_FILTER_TYPE_RESET 0x0020
#define UPGT_FILTER_UNKNOWN1 0x0002
#define UPGT_FILTER_UNKNOWN2 0x0ca8
#define UPGT_FILTER_UNKNOWN3 0xffff
#define UPGT_FILTER_MONITOR_UNKNOWN1 0x0000
#define UPGT_FILTER_MONITOR_UNKNOWN2 0x0000
#define UPGT_FILTER_MONITOR_UNKNOWN3 0x0000
struct upgt_lmac_filter {
struct upgt_lmac_h1 header1;
struct upgt_lmac_h2 header2;
/* 32 bytes */
uint16_t type;
uint8_t dst[IEEE80211_ADDR_LEN];
uint8_t src[IEEE80211_ADDR_LEN];
uint16_t unknown1;
uint32_t rxaddr;
uint16_t unknown2;
uint32_t rxhw;
uint16_t unknown3;
uint32_t unknown4;
} __packed;
/* frequence 3 data */
struct upgt_lmac_freq3 {
uint16_t freq;
uint8_t data[6];
} __packed;
/* frequence 4 data */
struct upgt_lmac_freq4 {
struct upgt_eeprom_freq4_2 cmd;
uint8_t pad;
};
/* frequence 6 data */
struct upgt_lmac_freq6 {
uint16_t freq;
uint8_t data[8];
} __packed;
#define UPGT_CHANNEL_UNKNOWN1 0x0001
#define UPGT_CHANNEL_UNKNOWN2 0x0000
#define UPGT_CHANNEL_UNKNOWN3 0x48
struct upgt_lmac_channel {
struct upgt_lmac_h1 header1;
struct upgt_lmac_h2 header2;
/* 112 bytes */
uint16_t unknown1;
uint16_t unknown2;
uint8_t pad1[20];
struct upgt_lmac_freq6 freq6;
uint8_t settings;
uint8_t unknown3;
uint8_t freq3_1[4];
struct upgt_lmac_freq4 freq4[8];
uint8_t freq3_2[4];
uint32_t pad2;
} __packed;
#define UPGT_LED_MODE_SET 0x0003
#define UPGT_LED_ACTION_OFF 0x0002
#define UPGT_LED_ACTION_ON 0x0003
#define UPGT_LED_ACTION_TMP_DUR 100 /* ms */
struct upgt_lmac_led {
struct upgt_lmac_h1 header1;
struct upgt_lmac_h2 header2;
uint16_t mode;
uint16_t action_fix;
uint16_t action_tmp;
uint16_t action_tmp_dur;
} __packed;
struct upgt_lmac_stats {
struct upgt_lmac_h1 header1;
struct upgt_lmac_h2 header2;
uint8_t data[76];
} __packed;
struct upgt_lmac_rx_desc {
struct upgt_lmac_h1 header1;
/* 16 bytes */
uint16_t freq;
uint8_t unknown1;
uint8_t rate;
uint8_t rssi;
uint8_t pad;
uint16_t unknown2;
uint32_t timestamp;
uint32_t unknown3;
uint8_t data[];
} __packed;
#define UPGT_TX_DESC_KEY_EXISTS 0x01
struct upgt_lmac_tx_desc_wep {
uint8_t key_exists;
uint8_t key_len;
uint8_t key_val[16];
} __packed;
#define UPGT_TX_DESC_TYPE_BEACON 0x00000000
#define UPGT_TX_DESC_TYPE_PROBE 0x00000001
#define UPGT_TX_DESC_TYPE_MGMT 0x00000002
#define UPGT_TX_DESC_TYPE_DATA 0x00000004
#define UPGT_TX_DESC_PAD3_SIZE 2
struct upgt_lmac_tx_desc {
struct upgt_lmac_h1 header1;
struct upgt_lmac_h2 header2;
uint8_t rates[8];
uint16_t pad1;
struct upgt_lmac_tx_desc_wep wep_key;
uint32_t type;
uint32_t pad2;
uint32_t unknown1;
uint32_t unknown2;
uint8_t pad3[2];
/* 802.11 frame data */
} __packed;
#define UPGT_TX_DONE_DESC_STATUS_OK 0x0001
struct upgt_lmac_tx_done_desc {
struct upgt_lmac_h1 header1;
struct upgt_lmac_h2 header2;
uint16_t status;
uint16_t rssi;
uint16_t seq;
uint16_t unknown;
} __packed;
/*
* USB xfers.
*/
struct upgt_data {
uint8_t *buf;
uint32_t buflen;
struct ieee80211_node *ni;
struct mbuf *m;
uint32_t addr;
STAILQ_ENTRY(upgt_data) next;
};
typedef STAILQ_HEAD(, upgt_data) upgt_datahead;
/*
* Prism memory.
*/
struct upgt_memory_page {
uint8_t used;
uint32_t addr;
} __packed;
#define UPGT_MEMORY_MAX_PAGES 8
struct upgt_memory {
uint8_t pages;
struct upgt_memory_page page[UPGT_MEMORY_MAX_PAGES];
} __packed;
/*
* BPF
*/
struct upgt_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
} __packed __aligned(8);
#define UPGT_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL))
struct upgt_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed __aligned(8);
#define UPGT_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct upgt_stat {
uint32_t st_tx_active;
uint32_t st_tx_inactive;
uint32_t st_tx_pending;
};
#define UPGT_STAT_INC(sc, var) (sc)->sc_stat.var++
#define UPGT_STAT_DEC(sc, var) (sc)->sc_stat.var--
struct upgt_vap {
struct ieee80211vap vap;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define UPGT_VAP(vap) ((struct upgt_vap *)(vap))
struct upgt_softc {
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
- struct ifnet *sc_ifp;
struct usb_device *sc_udev;
void *sc_rx_dma_buf;
void *sc_tx_dma_buf;
struct mtx sc_mtx;
struct upgt_stat sc_stat;
int sc_flags;
#define UPGT_FLAG_FWLOADED (1 << 0)
#define UPGT_FLAG_INITDONE (1 << 1)
#define UPGT_FLAG_DETACHED (1 << 2)
- int sc_if_flags;
int sc_debug;
-
- uint8_t sc_myaddr[IEEE80211_ADDR_LEN];
enum ieee80211_state sc_state;
int sc_arg;
int sc_led_blink;
struct callout sc_led_ch;
uint8_t sc_cur_rateset[8];
/* watchdog */
int sc_tx_timer;
struct callout sc_watchdog_ch;
/* Firmware. */
int sc_fw_type;
/* memory addresses on device */
uint32_t sc_memaddr_frame_start;
uint32_t sc_memaddr_frame_end;
uint32_t sc_memaddr_rx_start;
struct upgt_memory sc_memory;
/* data which we found in the EEPROM */
uint8_t sc_eeprom[2 * UPGT_EEPROM_SIZE] __aligned(4);
uint16_t sc_eeprom_hwrx;
struct upgt_lmac_freq3 sc_eeprom_freq3[IEEE80211_CHAN_MAX];
struct upgt_lmac_freq4 sc_eeprom_freq4[IEEE80211_CHAN_MAX][8];
struct upgt_lmac_freq6 sc_eeprom_freq6[IEEE80211_CHAN_MAX];
uint8_t sc_eeprom_freq6_settings;
/* RX/TX */
struct usb_xfer *sc_xfer[UPGT_N_XFERS];
int sc_rx_no;
int sc_tx_no;
struct upgt_data sc_rx_data[UPGT_RX_MAXCOUNT];
upgt_datahead sc_rx_active;
upgt_datahead sc_rx_inactive;
struct upgt_data sc_tx_data[UPGT_TX_MAXCOUNT];
upgt_datahead sc_tx_active;
upgt_datahead sc_tx_inactive;
upgt_datahead sc_tx_pending;
/* BPF */
struct upgt_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct upgt_tx_radiotap_header sc_txtap;
int sc_txtap_len;
};
#define UPGT_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define UPGT_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define UPGT_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
Index: head/sys/dev/usb/wlan/if_ural.c
===================================================================
--- head/sys/dev/usb/wlan/if_ural.c (revision 287196)
+++ head/sys/dev/usb/wlan/if_ural.c (revision 287197)
@@ -1,2302 +1,2230 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2005, 2006
* Damien Bergamini <damien.bergamini@free.fr>
*
* Copyright (c) 2006, 2008
* Hans Petter Selasky <hselasky@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*-
* Ralink Technology RT2500USB chipset driver
* http://www.ralinktech.com/
*/
#include <sys/param.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kdb.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#endif
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include "usbdevs.h"
#define USB_DEBUG_VAR ural_debug
#include <dev/usb/usb_debug.h>
#include <dev/usb/wlan/if_uralreg.h>
#include <dev/usb/wlan/if_uralvar.h>
#ifdef USB_DEBUG
static int ural_debug = 0;
static SYSCTL_NODE(_hw_usb, OID_AUTO, ural, CTLFLAG_RW, 0, "USB ural");
SYSCTL_INT(_hw_usb_ural, OID_AUTO, debug, CTLFLAG_RWTUN, &ural_debug, 0,
"Debug level");
#endif
#define URAL_RSSI(rssi) \
((rssi) > (RAL_NOISE_FLOOR + RAL_RSSI_CORR) ? \
((rssi) - (RAL_NOISE_FLOOR + RAL_RSSI_CORR)) : 0)
/* various supported device vendors/products */
static const STRUCT_USB_HOST_ID ural_devs[] = {
#define URAL_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) }
URAL_DEV(ASUS, WL167G),
URAL_DEV(ASUS, RT2570),
URAL_DEV(BELKIN, F5D7050),
URAL_DEV(BELKIN, F5D7051),
URAL_DEV(CISCOLINKSYS, HU200TS),
URAL_DEV(CISCOLINKSYS, WUSB54G),
URAL_DEV(CISCOLINKSYS, WUSB54GP),
URAL_DEV(CONCEPTRONIC2, C54RU),
URAL_DEV(DLINK, DWLG122),
URAL_DEV(GIGABYTE, GN54G),
URAL_DEV(GIGABYTE, GNWBKG),
URAL_DEV(GUILLEMOT, HWGUSB254),
URAL_DEV(MELCO, KG54),
URAL_DEV(MELCO, KG54AI),
URAL_DEV(MELCO, KG54YB),
URAL_DEV(MELCO, NINWIFI),
URAL_DEV(MSI, RT2570),
URAL_DEV(MSI, RT2570_2),
URAL_DEV(MSI, RT2570_3),
URAL_DEV(NOVATECH, NV902),
URAL_DEV(RALINK, RT2570),
URAL_DEV(RALINK, RT2570_2),
URAL_DEV(RALINK, RT2570_3),
URAL_DEV(SIEMENS2, WL54G),
URAL_DEV(SMC, 2862WG),
URAL_DEV(SPHAIRON, UB801R),
URAL_DEV(SURECOM, RT2570),
URAL_DEV(VTECH, RT2570),
URAL_DEV(ZINWELL, RT2570),
#undef URAL_DEV
};
static usb_callback_t ural_bulk_read_callback;
static usb_callback_t ural_bulk_write_callback;
static usb_error_t ural_do_request(struct ural_softc *sc,
struct usb_device_request *req, void *data);
static struct ieee80211vap *ural_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode,
int, const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void ural_vap_delete(struct ieee80211vap *);
static void ural_tx_free(struct ural_tx_data *, int);
static void ural_setup_tx_list(struct ural_softc *);
static void ural_unsetup_tx_list(struct ural_softc *);
static int ural_newstate(struct ieee80211vap *,
enum ieee80211_state, int);
static void ural_setup_tx_desc(struct ural_softc *,
struct ural_tx_desc *, uint32_t, int, int);
static int ural_tx_bcn(struct ural_softc *, struct mbuf *,
struct ieee80211_node *);
static int ural_tx_mgt(struct ural_softc *, struct mbuf *,
struct ieee80211_node *);
static int ural_tx_data(struct ural_softc *, struct mbuf *,
struct ieee80211_node *);
-static void ural_start(struct ifnet *);
-static int ural_ioctl(struct ifnet *, u_long, caddr_t);
+static int ural_transmit(struct ieee80211com *, struct mbuf *);
+static void ural_start(struct ural_softc *);
+static void ural_parent(struct ieee80211com *);
static void ural_set_testmode(struct ural_softc *);
static void ural_eeprom_read(struct ural_softc *, uint16_t, void *,
int);
static uint16_t ural_read(struct ural_softc *, uint16_t);
static void ural_read_multi(struct ural_softc *, uint16_t, void *,
int);
static void ural_write(struct ural_softc *, uint16_t, uint16_t);
static void ural_write_multi(struct ural_softc *, uint16_t, void *,
int) __unused;
static void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t);
static uint8_t ural_bbp_read(struct ural_softc *, uint8_t);
static void ural_rf_write(struct ural_softc *, uint8_t, uint32_t);
static void ural_scan_start(struct ieee80211com *);
static void ural_scan_end(struct ieee80211com *);
static void ural_set_channel(struct ieee80211com *);
static void ural_set_chan(struct ural_softc *,
struct ieee80211_channel *);
static void ural_disable_rf_tune(struct ural_softc *);
static void ural_enable_tsf_sync(struct ural_softc *);
static void ural_enable_tsf(struct ural_softc *);
-static void ural_update_slot(struct ifnet *);
+static void ural_update_slot(struct ural_softc *);
static void ural_set_txpreamble(struct ural_softc *);
static void ural_set_basicrates(struct ural_softc *,
const struct ieee80211_channel *);
static void ural_set_bssid(struct ural_softc *, const uint8_t *);
-static void ural_set_macaddr(struct ural_softc *, uint8_t *);
+static void ural_set_macaddr(struct ural_softc *, const uint8_t *);
static void ural_update_promisc(struct ieee80211com *);
static void ural_setpromisc(struct ural_softc *);
static const char *ural_get_rf(int);
static void ural_read_eeprom(struct ural_softc *);
static int ural_bbp_init(struct ural_softc *);
static void ural_set_txantenna(struct ural_softc *, int);
static void ural_set_rxantenna(struct ural_softc *, int);
-static void ural_init_locked(struct ural_softc *);
-static void ural_init(void *);
+static void ural_init(struct ural_softc *);
static void ural_stop(struct ural_softc *);
static int ural_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void ural_ratectl_start(struct ural_softc *,
struct ieee80211_node *);
static void ural_ratectl_timeout(void *);
static void ural_ratectl_task(void *, int);
static int ural_pause(struct ural_softc *sc, int timeout);
/*
* Default values for MAC registers; values taken from the reference driver.
*/
static const struct {
uint16_t reg;
uint16_t val;
} ural_def_mac[] = {
{ RAL_TXRX_CSR5, 0x8c8d },
{ RAL_TXRX_CSR6, 0x8b8a },
{ RAL_TXRX_CSR7, 0x8687 },
{ RAL_TXRX_CSR8, 0x0085 },
{ RAL_MAC_CSR13, 0x1111 },
{ RAL_MAC_CSR14, 0x1e11 },
{ RAL_TXRX_CSR21, 0xe78f },
{ RAL_MAC_CSR9, 0xff1d },
{ RAL_MAC_CSR11, 0x0002 },
{ RAL_MAC_CSR22, 0x0053 },
{ RAL_MAC_CSR15, 0x0000 },
{ RAL_MAC_CSR8, RAL_FRAME_SIZE },
{ RAL_TXRX_CSR19, 0x0000 },
{ RAL_TXRX_CSR18, 0x005a },
{ RAL_PHY_CSR2, 0x0000 },
{ RAL_TXRX_CSR0, 0x1ec0 },
{ RAL_PHY_CSR4, 0x000f }
};
/*
* Default values for BBP registers; values taken from the reference driver.
*/
static const struct {
uint8_t reg;
uint8_t val;
} ural_def_bbp[] = {
{ 3, 0x02 },
{ 4, 0x19 },
{ 14, 0x1c },
{ 15, 0x30 },
{ 16, 0xac },
{ 17, 0x48 },
{ 18, 0x18 },
{ 19, 0xff },
{ 20, 0x1e },
{ 21, 0x08 },
{ 22, 0x08 },
{ 23, 0x08 },
{ 24, 0x80 },
{ 25, 0x50 },
{ 26, 0x08 },
{ 27, 0x23 },
{ 30, 0x10 },
{ 31, 0x2b },
{ 32, 0xb9 },
{ 34, 0x12 },
{ 35, 0x50 },
{ 39, 0xc4 },
{ 40, 0x02 },
{ 41, 0x60 },
{ 53, 0x10 },
{ 54, 0x18 },
{ 56, 0x08 },
{ 57, 0x10 },
{ 58, 0x08 },
{ 61, 0x60 },
{ 62, 0x10 },
{ 75, 0xff }
};
/*
* Default values for RF register R2 indexed by channel numbers.
*/
static const uint32_t ural_rf2522_r2[] = {
0x307f6, 0x307fb, 0x30800, 0x30805, 0x3080a, 0x3080f, 0x30814,
0x30819, 0x3081e, 0x30823, 0x30828, 0x3082d, 0x30832, 0x3083e
};
static const uint32_t ural_rf2523_r2[] = {
0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d,
0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346
};
static const uint32_t ural_rf2524_r2[] = {
0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d,
0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346
};
static const uint32_t ural_rf2525_r2[] = {
0x20327, 0x20328, 0x20329, 0x2032a, 0x2032b, 0x2032c, 0x2032d,
0x2032e, 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20346
};
static const uint32_t ural_rf2525_hi_r2[] = {
0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20344, 0x20345,
0x20346, 0x20347, 0x20348, 0x20349, 0x2034a, 0x2034b, 0x2034e
};
static const uint32_t ural_rf2525e_r2[] = {
0x2044d, 0x2044e, 0x2044f, 0x20460, 0x20461, 0x20462, 0x20463,
0x20464, 0x20465, 0x20466, 0x20467, 0x20468, 0x20469, 0x2046b
};
static const uint32_t ural_rf2526_hi_r2[] = {
0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d, 0x0022d,
0x0022e, 0x0022e, 0x0022f, 0x0022d, 0x00240, 0x00240, 0x00241
};
static const uint32_t ural_rf2526_r2[] = {
0x00226, 0x00227, 0x00227, 0x00228, 0x00228, 0x00229, 0x00229,
0x0022a, 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d
};
/*
* For dual-band RF, RF registers R1 and R4 also depend on channel number;
* values taken from the reference driver.
*/
static const struct {
uint8_t chan;
uint32_t r1;
uint32_t r2;
uint32_t r4;
} ural_rf5222[] = {
{ 1, 0x08808, 0x0044d, 0x00282 },
{ 2, 0x08808, 0x0044e, 0x00282 },
{ 3, 0x08808, 0x0044f, 0x00282 },
{ 4, 0x08808, 0x00460, 0x00282 },
{ 5, 0x08808, 0x00461, 0x00282 },
{ 6, 0x08808, 0x00462, 0x00282 },
{ 7, 0x08808, 0x00463, 0x00282 },
{ 8, 0x08808, 0x00464, 0x00282 },
{ 9, 0x08808, 0x00465, 0x00282 },
{ 10, 0x08808, 0x00466, 0x00282 },
{ 11, 0x08808, 0x00467, 0x00282 },
{ 12, 0x08808, 0x00468, 0x00282 },
{ 13, 0x08808, 0x00469, 0x00282 },
{ 14, 0x08808, 0x0046b, 0x00286 },
{ 36, 0x08804, 0x06225, 0x00287 },
{ 40, 0x08804, 0x06226, 0x00287 },
{ 44, 0x08804, 0x06227, 0x00287 },
{ 48, 0x08804, 0x06228, 0x00287 },
{ 52, 0x08804, 0x06229, 0x00287 },
{ 56, 0x08804, 0x0622a, 0x00287 },
{ 60, 0x08804, 0x0622b, 0x00287 },
{ 64, 0x08804, 0x0622c, 0x00287 },
{ 100, 0x08804, 0x02200, 0x00283 },
{ 104, 0x08804, 0x02201, 0x00283 },
{ 108, 0x08804, 0x02202, 0x00283 },
{ 112, 0x08804, 0x02203, 0x00283 },
{ 116, 0x08804, 0x02204, 0x00283 },
{ 120, 0x08804, 0x02205, 0x00283 },
{ 124, 0x08804, 0x02206, 0x00283 },
{ 128, 0x08804, 0x02207, 0x00283 },
{ 132, 0x08804, 0x02208, 0x00283 },
{ 136, 0x08804, 0x02209, 0x00283 },
{ 140, 0x08804, 0x0220a, 0x00283 },
{ 149, 0x08808, 0x02429, 0x00281 },
{ 153, 0x08808, 0x0242b, 0x00281 },
{ 157, 0x08808, 0x0242d, 0x00281 },
{ 161, 0x08808, 0x0242f, 0x00281 }
};
static const struct usb_config ural_config[URAL_N_TRANSFER] = {
[URAL_BULK_WR] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE + 4),
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = ural_bulk_write_callback,
.timeout = 5000, /* ms */
},
[URAL_BULK_RD] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = (RAL_FRAME_SIZE + RAL_RX_DESC_SIZE),
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = ural_bulk_read_callback,
},
};
static device_probe_t ural_match;
static device_attach_t ural_attach;
static device_detach_t ural_detach;
static device_method_t ural_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ural_match),
DEVMETHOD(device_attach, ural_attach),
DEVMETHOD(device_detach, ural_detach),
DEVMETHOD_END
};
static driver_t ural_driver = {
.name = "ural",
.methods = ural_methods,
.size = sizeof(struct ural_softc),
};
static devclass_t ural_devclass;
DRIVER_MODULE(ural, uhub, ural_driver, ural_devclass, NULL, 0);
MODULE_DEPEND(ural, usb, 1, 1, 1);
MODULE_DEPEND(ural, wlan, 1, 1, 1);
MODULE_VERSION(ural, 1);
static int
ural_match(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != 0)
return (ENXIO);
if (uaa->info.bIfaceIndex != RAL_IFACE_INDEX)
return (ENXIO);
return (usbd_lookup_id_by_uaa(ural_devs, sizeof(ural_devs), uaa));
}
static int
ural_attach(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
struct ural_softc *sc = device_get_softc(self);
- struct ifnet *ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t iface_index, bands;
int error;
device_set_usb_desc(self);
sc->sc_udev = uaa->device;
sc->sc_dev = self;
mtx_init(&sc->sc_mtx, device_get_nameunit(self),
MTX_NETWORK_LOCK, MTX_DEF);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
iface_index = RAL_IFACE_INDEX;
error = usbd_transfer_setup(uaa->device,
&iface_index, sc->sc_xfer, ural_config,
URAL_N_TRANSFER, sc, &sc->sc_mtx);
if (error) {
device_printf(self, "could not allocate USB transfers, "
"err=%s\n", usbd_errstr(error));
goto detach;
}
RAL_LOCK(sc);
/* retrieve RT2570 rev. no */
sc->asic_rev = ural_read(sc, RAL_MAC_CSR0);
/* retrieve MAC address and various other things from EEPROM */
ural_read_eeprom(sc);
RAL_UNLOCK(sc);
device_printf(self, "MAC/BBP RT2570 (rev 0x%02x), RF %s\n",
sc->asic_rev, ural_get_rf(sc->rf_rev));
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- goto detach;
- }
- ic = ifp->if_l2com;
-
- ifp->if_softc = sc;
- if_initname(ifp, "ural", device_get_unit(sc->sc_dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = ural_init;
- ifp->if_ioctl = ural_ioctl;
- ifp->if_start = ural_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(self);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_IBSS /* IBSS mode supported */
| IEEE80211_C_MONITOR /* monitor mode supported */
| IEEE80211_C_HOSTAP /* HostAp mode supported */
| IEEE80211_C_TXPMGT /* tx power management */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_BGSCAN /* bg scanning supported */
| IEEE80211_C_WPA /* 802.11i */
;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
if (sc->rf_rev == RAL_RF_5222)
setbit(&bands, IEEE80211_MODE_11A);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, sc->sc_bssid);
+ ieee80211_ifattach(ic);
ic->ic_update_promisc = ural_update_promisc;
ic->ic_raw_xmit = ural_raw_xmit;
ic->ic_scan_start = ural_scan_start;
ic->ic_scan_end = ural_scan_end;
ic->ic_set_channel = ural_set_channel;
-
+ ic->ic_parent = ural_parent;
+ ic->ic_transmit = ural_transmit;
ic->ic_vap_create = ural_vap_create;
ic->ic_vap_delete = ural_vap_delete;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
RAL_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
RAL_RX_RADIOTAP_PRESENT);
if (bootverbose)
ieee80211_announce(ic);
return (0);
detach:
ural_detach(self);
return (ENXIO); /* failure */
}
static int
ural_detach(device_t self)
{
struct ural_softc *sc = device_get_softc(self);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
/* prevent further ioctls */
RAL_LOCK(sc);
sc->sc_detached = 1;
RAL_UNLOCK(sc);
/* stop all USB transfers */
usbd_transfer_unsetup(sc->sc_xfer, URAL_N_TRANSFER);
/* free TX list, if any */
RAL_LOCK(sc);
ural_unsetup_tx_list(sc);
RAL_UNLOCK(sc);
- if (ifp) {
- ic = ifp->if_l2com;
+ if (ic->ic_softc == sc)
ieee80211_ifdetach(ic);
- if_free(ifp);
- }
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static usb_error_t
ural_do_request(struct ural_softc *sc,
struct usb_device_request *req, void *data)
{
usb_error_t err;
int ntries = 10;
while (ntries--) {
err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
req, data, 0, NULL, 250 /* ms */);
if (err == 0)
break;
DPRINTFN(1, "Control request failed, %s (retrying)\n",
usbd_errstr(err));
if (ural_pause(sc, hz / 100))
break;
}
return (err);
}
static struct ieee80211vap *
ural_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ural_softc *sc = ic->ic_softc;
struct ural_vap *uvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
uvp = (struct ural_vap *) malloc(sizeof(struct ural_vap),
M_80211_VAP, M_NOWAIT | M_ZERO);
if (uvp == NULL)
return NULL;
vap = &uvp->vap;
/* enable s/w bmiss handling for sta mode */
if (ieee80211_vap_setup(ic, vap, name, unit, opmode,
- flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) {
+ flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) {
/* out of memory */
free(uvp, M_80211_VAP);
return (NULL);
}
/* override state transition machine */
uvp->newstate = vap->iv_newstate;
vap->iv_newstate = ural_newstate;
usb_callout_init_mtx(&uvp->ratectl_ch, &sc->sc_mtx, 0);
TASK_INIT(&uvp->ratectl_task, 0, ural_ratectl_task, uvp);
ieee80211_ratectl_init(vap);
ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */);
/* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, ieee80211_media_change,
+ ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return vap;
}
static void
ural_vap_delete(struct ieee80211vap *vap)
{
struct ural_vap *uvp = URAL_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
usb_callout_drain(&uvp->ratectl_ch);
ieee80211_draintask(ic, &uvp->ratectl_task);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(uvp, M_80211_VAP);
}
static void
ural_tx_free(struct ural_tx_data *data, int txerr)
{
struct ural_softc *sc = data->sc;
if (data->m != NULL) {
- if (data->m->m_flags & M_TXCB)
- ieee80211_process_callback(data->ni, data->m,
- txerr ? ETIMEDOUT : 0);
- m_freem(data->m);
+ ieee80211_tx_complete(data->ni, data->m, txerr);
data->m = NULL;
-
- ieee80211_free_node(data->ni);
data->ni = NULL;
}
STAILQ_INSERT_TAIL(&sc->tx_free, data, next);
sc->tx_nfree++;
}
static void
ural_setup_tx_list(struct ural_softc *sc)
{
struct ural_tx_data *data;
int i;
sc->tx_nfree = 0;
STAILQ_INIT(&sc->tx_q);
STAILQ_INIT(&sc->tx_free);
for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
data = &sc->tx_data[i];
data->sc = sc;
STAILQ_INSERT_TAIL(&sc->tx_free, data, next);
sc->tx_nfree++;
}
}
static void
ural_unsetup_tx_list(struct ural_softc *sc)
{
struct ural_tx_data *data;
int i;
/* make sure any subsequent use of the queues will fail */
sc->tx_nfree = 0;
STAILQ_INIT(&sc->tx_q);
STAILQ_INIT(&sc->tx_free);
/* free up all node references and mbufs */
for (i = 0; i < RAL_TX_LIST_COUNT; i++) {
data = &sc->tx_data[i];
if (data->m != NULL) {
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
}
}
static int
ural_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ural_vap *uvp = URAL_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct ural_softc *sc = ic->ic_softc;
const struct ieee80211_txparam *tp;
struct ieee80211_node *ni;
struct mbuf *m;
DPRINTF("%s -> %s\n",
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
RAL_LOCK(sc);
usb_callout_stop(&uvp->ratectl_ch);
switch (nstate) {
case IEEE80211_S_INIT:
if (vap->iv_state == IEEE80211_S_RUN) {
/* abort TSF synchronization */
ural_write(sc, RAL_TXRX_CSR19, 0);
/* force tx led to stop blinking */
ural_write(sc, RAL_MAC_CSR20, 0);
}
break;
case IEEE80211_S_RUN:
ni = ieee80211_ref_node(vap->iv_bss);
if (vap->iv_opmode != IEEE80211_M_MONITOR) {
if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
RAL_UNLOCK(sc);
IEEE80211_LOCK(ic);
ieee80211_free_node(ni);
return (-1);
}
- ural_update_slot(ic->ic_ifp);
+ ural_update_slot(sc);
ural_set_txpreamble(sc);
ural_set_basicrates(sc, ic->ic_bsschan);
- IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid);
- ural_set_bssid(sc, sc->sc_bssid);
+ IEEE80211_ADDR_COPY(ic->ic_macaddr, ni->ni_bssid);
+ ural_set_bssid(sc, ic->ic_macaddr);
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS) {
m = ieee80211_beacon_alloc(ni, &uvp->bo);
if (m == NULL) {
device_printf(sc->sc_dev,
"could not allocate beacon\n");
RAL_UNLOCK(sc);
IEEE80211_LOCK(ic);
ieee80211_free_node(ni);
return (-1);
}
ieee80211_ref_node(ni);
if (ural_tx_bcn(sc, m, ni) != 0) {
device_printf(sc->sc_dev,
"could not send beacon\n");
RAL_UNLOCK(sc);
IEEE80211_LOCK(ic);
ieee80211_free_node(ni);
return (-1);
}
}
/* make tx led blink on tx (controlled by ASIC) */
ural_write(sc, RAL_MAC_CSR20, 1);
if (vap->iv_opmode != IEEE80211_M_MONITOR)
ural_enable_tsf_sync(sc);
else
ural_enable_tsf(sc);
/* enable automatic rate adaptation */
/* XXX should use ic_bsschan but not valid until after newstate call below */
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE)
ural_ratectl_start(sc, ni);
ieee80211_free_node(ni);
break;
default:
break;
}
RAL_UNLOCK(sc);
IEEE80211_LOCK(ic);
return (uvp->newstate(vap, nstate, arg));
}
static void
ural_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct ural_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
struct ieee80211vap *vap;
struct ural_tx_data *data;
struct mbuf *m;
struct usb_page_cache *pc;
int len;
usbd_xfer_status(xfer, &len, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(11, "transfer complete, %d bytes\n", len);
/* free resources */
data = usbd_xfer_get_priv(xfer);
ural_tx_free(data, 0);
usbd_xfer_set_priv(xfer, NULL);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
data = STAILQ_FIRST(&sc->tx_q);
if (data) {
STAILQ_REMOVE_HEAD(&sc->tx_q, next);
m = data->m;
if (m->m_pkthdr.len > (int)(RAL_FRAME_SIZE + RAL_TX_DESC_SIZE)) {
DPRINTFN(0, "data overflow, %u bytes\n",
m->m_pkthdr.len);
m->m_pkthdr.len = (RAL_FRAME_SIZE + RAL_TX_DESC_SIZE);
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &data->desc, RAL_TX_DESC_SIZE);
usbd_m_copy_in(pc, RAL_TX_DESC_SIZE, m, 0,
m->m_pkthdr.len);
vap = data->ni->ni_vap;
if (ieee80211_radiotap_active_vap(vap)) {
struct ural_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = data->rate;
tap->wt_antenna = sc->tx_ant;
ieee80211_radiotap_tx(vap, m);
}
/* xfer length needs to be a multiple of two! */
len = (RAL_TX_DESC_SIZE + m->m_pkthdr.len + 1) & ~1;
if ((len % 64) == 0)
len += 2;
DPRINTFN(11, "sending frame len=%u xferlen=%u\n",
m->m_pkthdr.len, len);
usbd_xfer_set_frame_len(xfer, 0, len);
usbd_xfer_set_priv(xfer, data);
usbd_transfer_submit(xfer);
}
- RAL_UNLOCK(sc);
- ural_start(ifp);
- RAL_LOCK(sc);
+ ural_start(sc);
break;
default: /* Error */
DPRINTFN(11, "transfer error, %s\n",
usbd_errstr(error));
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
data = usbd_xfer_get_priv(xfer);
if (data != NULL) {
ural_tx_free(data, error);
usbd_xfer_set_priv(xfer, NULL);
}
if (error == USB_ERR_STALLED) {
/* try to clear stall first */
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
if (error == USB_ERR_TIMEOUT)
device_printf(sc->sc_dev, "device timeout\n");
break;
}
}
static void
ural_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct ural_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
struct mbuf *m = NULL;
struct usb_page_cache *pc;
uint32_t flags;
int8_t rssi = 0, nf = 0;
int len;
usbd_xfer_status(xfer, &len, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTFN(15, "rx done, actlen=%d\n", len);
if (len < (int)(RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN)) {
DPRINTF("%s: xfer too short %d\n",
device_get_nameunit(sc->sc_dev), len);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto tr_setup;
}
len -= RAL_RX_DESC_SIZE;
/* rx descriptor is located at the end */
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, len, &sc->sc_rx_desc, RAL_RX_DESC_SIZE);
rssi = URAL_RSSI(sc->sc_rx_desc.rssi);
nf = RAL_NOISE_FLOOR;
flags = le32toh(sc->sc_rx_desc.flags);
if (flags & (RAL_RX_PHY_ERROR | RAL_RX_CRC_ERROR)) {
/*
* This should not happen since we did not
* request to receive those frames when we
* filled RAL_TXRX_CSR2:
*/
DPRINTFN(5, "PHY or CRC error\n");
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto tr_setup;
}
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
DPRINTF("could not allocate mbuf\n");
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto tr_setup;
}
usbd_copy_out(pc, 0, mtod(m, uint8_t *), len);
/* finalize mbuf */
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = (flags >> 16) & 0xfff;
if (ieee80211_radiotap_active(ic)) {
struct ural_rx_radiotap_header *tap = &sc->sc_rxtap;
/* XXX set once */
tap->wr_flags = 0;
tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate,
(flags & RAL_RX_OFDM) ?
IEEE80211_T_OFDM : IEEE80211_T_CCK);
tap->wr_antenna = sc->rx_ant;
tap->wr_antsignal = nf + rssi;
tap->wr_antnoise = nf;
}
/* Strip trailing 802.11 MAC FCS. */
m_adj(m, -IEEE80211_CRC_LEN);
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
/*
* At the end of a USB callback it is always safe to unlock
* the private mutex of a device! That is why we do the
* "ieee80211_input" here, and not some lines up!
*/
RAL_UNLOCK(sc);
if (m) {
ni = ieee80211_find_rxnode(ic,
mtod(m, struct ieee80211_frame_min *));
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
}
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
- !IFQ_IS_EMPTY(&ifp->if_snd))
- ural_start(ifp);
RAL_LOCK(sc);
+ ural_start(sc);
return;
default: /* Error */
if (error != USB_ERR_CANCELLED) {
/* try to clear stall first */
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
return;
}
}
static uint8_t
ural_plcp_signal(int rate)
{
switch (rate) {
/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
case 12: return 0xb;
case 18: return 0xf;
case 24: return 0xa;
case 36: return 0xe;
case 48: return 0x9;
case 72: return 0xd;
case 96: return 0x8;
case 108: return 0xc;
/* CCK rates (NB: not IEEE std, device-specific) */
case 2: return 0x0;
case 4: return 0x1;
case 11: return 0x2;
case 22: return 0x3;
}
return 0xff; /* XXX unsupported/unknown rate */
}
static void
ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc,
uint32_t flags, int len, int rate)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t plcp_length;
int remainder;
desc->flags = htole32(flags);
desc->flags |= htole32(RAL_TX_NEWSEQ);
desc->flags |= htole32(len << 16);
desc->wme = htole16(RAL_AIFSN(2) | RAL_LOGCWMIN(3) | RAL_LOGCWMAX(5));
desc->wme |= htole16(RAL_IVOFFSET(sizeof (struct ieee80211_frame)));
/* setup PLCP fields */
desc->plcp_signal = ural_plcp_signal(rate);
desc->plcp_service = 4;
len += IEEE80211_CRC_LEN;
if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) {
desc->flags |= htole32(RAL_TX_OFDM);
plcp_length = len & 0xfff;
desc->plcp_length_hi = plcp_length >> 6;
desc->plcp_length_lo = plcp_length & 0x3f;
} else {
if (rate == 0)
rate = 2; /* avoid division by zero */
plcp_length = (16 * len + rate - 1) / rate;
if (rate == 22) {
remainder = (16 * len) % 22;
if (remainder != 0 && remainder < 7)
desc->plcp_service |= RAL_PLCP_LENGEXT;
}
desc->plcp_length_hi = plcp_length >> 8;
desc->plcp_length_lo = plcp_length & 0xff;
if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
desc->plcp_signal |= 0x08;
}
desc->iv = 0;
desc->eiv = 0;
}
#define RAL_TX_TIMEOUT 5000
static int
ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = sc->sc_ifp;
const struct ieee80211_txparam *tp;
struct ural_tx_data *data;
if (sc->tx_nfree == 0) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
m_freem(m0);
ieee80211_free_node(ni);
return (EIO);
}
if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
m_freem(m0);
ieee80211_free_node(ni);
return (ENXIO);
}
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)];
data->m = m0;
data->ni = ni;
data->rate = tp->mgmtrate;
ural_setup_tx_desc(sc, &data->desc,
RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP, m0->m_pkthdr.len,
tp->mgmtrate);
DPRINTFN(10, "sending beacon frame len=%u rate=%u\n",
m0->m_pkthdr.len, tp->mgmtrate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]);
return (0);
}
static int
ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
const struct ieee80211_txparam *tp;
struct ural_tx_data *data;
struct ieee80211_frame *wh;
struct ieee80211_key *k;
uint32_t flags;
uint16_t dur;
RAL_LOCK_ASSERT(sc, MA_OWNED);
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
wh = mtod(m0, struct ieee80211_frame *);
}
data->m = m0;
data->ni = ni;
data->rate = tp->mgmtrate;
flags = 0;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RAL_TX_ACK;
dur = ieee80211_ack_duration(ic->ic_rt, tp->mgmtrate,
ic->ic_flags & IEEE80211_F_SHPREAMBLE);
USETW(wh->i_dur, dur);
/* tell hardware to add timestamp for probe responses */
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
IEEE80211_FC0_TYPE_MGT &&
(wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
IEEE80211_FC0_SUBTYPE_PROBE_RESP)
flags |= RAL_TX_TIMESTAMP;
}
ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, tp->mgmtrate);
DPRINTFN(10, "sending mgt frame len=%u rate=%u\n",
m0->m_pkthdr.len, tp->mgmtrate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]);
return 0;
}
static int
ural_sendprot(struct ural_softc *sc,
const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate)
{
struct ieee80211com *ic = ni->ni_ic;
const struct ieee80211_frame *wh;
struct ural_tx_data *data;
struct mbuf *mprot;
int protrate, ackrate, pktlen, flags, isshort;
uint16_t dur;
KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY,
("protection %d", prot));
wh = mtod(m, const struct ieee80211_frame *);
pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN;
protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
ackrate = ieee80211_ack_rate(ic->ic_rt, rate);
isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0;
dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort)
+ ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags = RAL_TX_RETRY(7);
if (prot == IEEE80211_PROT_RTSCTS) {
/* NB: CTS is the same size as an ACK */
dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort);
flags |= RAL_TX_ACK;
mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur);
} else {
mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur);
}
if (mprot == NULL) {
/* XXX stat + msg */
return ENOBUFS;
}
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
data->m = mprot;
data->ni = ieee80211_ref_node(ni);
data->rate = protrate;
ural_setup_tx_desc(sc, &data->desc, flags, mprot->m_pkthdr.len, protrate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]);
return 0;
}
static int
ural_tx_raw(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
struct ural_tx_data *data;
uint32_t flags;
int error;
int rate;
RAL_LOCK_ASSERT(sc, MA_OWNED);
KASSERT(params != NULL, ("no raw xmit params"));
rate = params->ibp_rate0;
if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
m_freem(m0);
return EINVAL;
}
flags = 0;
if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
flags |= RAL_TX_ACK;
if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) {
error = ural_sendprot(sc, m0, ni,
params->ibp_flags & IEEE80211_BPF_RTS ?
IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY,
rate);
if (error || sc->tx_nfree == 0) {
m_freem(m0);
return ENOBUFS;
}
flags |= RAL_TX_IFS_SIFS;
}
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
data->m = m0;
data->ni = ni;
data->rate = rate;
/* XXX need to setup descriptor ourself */
ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate);
DPRINTFN(10, "sending raw frame len=%u rate=%u\n",
m0->m_pkthdr.len, rate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]);
return 0;
}
static int
ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ural_tx_data *data;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp;
struct ieee80211_key *k;
uint32_t flags = 0;
uint16_t dur;
int error, rate;
RAL_LOCK_ASSERT(sc, MA_OWNED);
wh = mtod(m0, struct ieee80211_frame *);
tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
rate = tp->mcastrate;
else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rate = tp->ucastrate;
else
rate = ni->ni_txrate;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return ENOBUFS;
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
int prot = IEEE80211_PROT_NONE;
if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
prot = IEEE80211_PROT_RTSCTS;
else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM)
prot = ic->ic_protmode;
if (prot != IEEE80211_PROT_NONE) {
error = ural_sendprot(sc, m0, ni, prot, rate);
if (error || sc->tx_nfree == 0) {
m_freem(m0);
return ENOBUFS;
}
flags |= RAL_TX_IFS_SIFS;
}
}
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
data->m = m0;
data->ni = ni;
data->rate = rate;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= RAL_TX_ACK;
flags |= RAL_TX_RETRY(7);
dur = ieee80211_ack_duration(ic->ic_rt, rate,
ic->ic_flags & IEEE80211_F_SHPREAMBLE);
USETW(wh->i_dur, dur);
}
ural_setup_tx_desc(sc, &data->desc, flags, m0->m_pkthdr.len, rate);
DPRINTFN(10, "sending data frame len=%u rate=%u\n",
m0->m_pkthdr.len, rate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[URAL_BULK_WR]);
return 0;
}
+static int
+ural_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct ural_softc *sc = ic->ic_softc;
+ int error;
+
+ RAL_LOCK(sc);
+ if (!sc->sc_running) {
+ RAL_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ RAL_UNLOCK(sc);
+ return (error);
+ }
+ ural_start(sc);
+ RAL_UNLOCK(sc);
+
+ return (0);
+}
+
static void
-ural_start(struct ifnet *ifp)
+ural_start(struct ural_softc *sc)
{
- struct ural_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
- RAL_LOCK(sc);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- RAL_UNLOCK(sc);
+ RAL_LOCK_ASSERT(sc, MA_OWNED);
+
+ if (sc->sc_running == 0)
return;
- }
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
- if (sc->tx_nfree < RAL_TX_MINFREE) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
+
+ while (sc->tx_nfree >= RAL_TX_MINFREE &&
+ (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
if (ural_tx_data(sc, m, ni) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
break;
}
}
- RAL_UNLOCK(sc);
}
-static int
-ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+ural_parent(struct ieee80211com *ic)
{
- struct ural_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- int error;
+ struct ural_softc *sc = ic->ic_softc;
int startall = 0;
RAL_LOCK(sc);
- error = sc->sc_detached ? ENXIO : 0;
- RAL_UNLOCK(sc);
- if (error)
- return (error);
-
- switch (cmd) {
- case SIOCSIFFLAGS:
- RAL_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- ural_init_locked(sc);
- startall = 1;
- } else
- ural_setpromisc(sc);
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ural_stop(sc);
- }
+ if (sc->sc_detached) {
RAL_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- case SIOCSIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- default:
- error = ether_ioctl(ifp, cmd, data);
- break;
+ return;
}
- return error;
+ if (ic->ic_nrunning > 0) {
+ if (sc->sc_running == 0) {
+ ural_init(sc);
+ startall = 1;
+ } else
+ ural_setpromisc(sc);
+ } else if (sc->sc_running)
+ ural_stop(sc);
+ RAL_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static void
ural_set_testmode(struct ural_softc *sc)
{
struct usb_device_request req;
usb_error_t error;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = RAL_VENDOR_REQUEST;
USETW(req.wValue, 4);
USETW(req.wIndex, 1);
USETW(req.wLength, 0);
error = ural_do_request(sc, &req, NULL);
if (error != 0) {
device_printf(sc->sc_dev, "could not set test mode: %s\n",
usbd_errstr(error));
}
}
static void
ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len)
{
struct usb_device_request req;
usb_error_t error;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = RAL_READ_EEPROM;
USETW(req.wValue, 0);
USETW(req.wIndex, addr);
USETW(req.wLength, len);
error = ural_do_request(sc, &req, buf);
if (error != 0) {
device_printf(sc->sc_dev, "could not read EEPROM: %s\n",
usbd_errstr(error));
}
}
static uint16_t
ural_read(struct ural_softc *sc, uint16_t reg)
{
struct usb_device_request req;
usb_error_t error;
uint16_t val;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = RAL_READ_MAC;
USETW(req.wValue, 0);
USETW(req.wIndex, reg);
USETW(req.wLength, sizeof (uint16_t));
error = ural_do_request(sc, &req, &val);
if (error != 0) {
device_printf(sc->sc_dev, "could not read MAC register: %s\n",
usbd_errstr(error));
return 0;
}
return le16toh(val);
}
static void
ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
{
struct usb_device_request req;
usb_error_t error;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = RAL_READ_MULTI_MAC;
USETW(req.wValue, 0);
USETW(req.wIndex, reg);
USETW(req.wLength, len);
error = ural_do_request(sc, &req, buf);
if (error != 0) {
device_printf(sc->sc_dev, "could not read MAC register: %s\n",
usbd_errstr(error));
}
}
static void
ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val)
{
struct usb_device_request req;
usb_error_t error;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = RAL_WRITE_MAC;
USETW(req.wValue, val);
USETW(req.wIndex, reg);
USETW(req.wLength, 0);
error = ural_do_request(sc, &req, NULL);
if (error != 0) {
device_printf(sc->sc_dev, "could not write MAC register: %s\n",
usbd_errstr(error));
}
}
static void
ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len)
{
struct usb_device_request req;
usb_error_t error;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = RAL_WRITE_MULTI_MAC;
USETW(req.wValue, 0);
USETW(req.wIndex, reg);
USETW(req.wLength, len);
error = ural_do_request(sc, &req, buf);
if (error != 0) {
device_printf(sc->sc_dev, "could not write MAC register: %s\n",
usbd_errstr(error));
}
}
static void
ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val)
{
uint16_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
break;
if (ural_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to BBP\n");
return;
}
tmp = reg << 8 | val;
ural_write(sc, RAL_PHY_CSR7, tmp);
}
static uint8_t
ural_bbp_read(struct ural_softc *sc, uint8_t reg)
{
uint16_t val;
int ntries;
val = RAL_BBP_WRITE | reg << 8;
ural_write(sc, RAL_PHY_CSR7, val);
for (ntries = 0; ntries < 100; ntries++) {
if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY))
break;
if (ural_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not read BBP\n");
return 0;
}
return ural_read(sc, RAL_PHY_CSR7) & 0xff;
}
static void
ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val)
{
uint32_t tmp;
int ntries;
for (ntries = 0; ntries < 100; ntries++) {
if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY))
break;
if (ural_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev, "could not write to RF\n");
return;
}
tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3);
ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff);
ural_write(sc, RAL_PHY_CSR10, tmp >> 16);
/* remember last written value in sc */
sc->rf_regs[reg] = val;
DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff);
}
static void
ural_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
struct ural_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
ural_write(sc, RAL_TXRX_CSR19, 0);
- ural_set_bssid(sc, ifp->if_broadcastaddr);
+ ural_set_bssid(sc, ieee80211broadcastaddr);
RAL_UNLOCK(sc);
}
static void
ural_scan_end(struct ieee80211com *ic)
{
struct ural_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
ural_enable_tsf_sync(sc);
- ural_set_bssid(sc, sc->sc_bssid);
+ ural_set_bssid(sc, ic->ic_macaddr);
RAL_UNLOCK(sc);
}
static void
ural_set_channel(struct ieee80211com *ic)
{
struct ural_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
ural_set_chan(sc, ic->ic_curchan);
RAL_UNLOCK(sc);
}
static void
ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t power, tmp;
int i, chan;
chan = ieee80211_chan2ieee(ic, c);
if (chan == 0 || chan == IEEE80211_CHAN_ANY)
return;
if (IEEE80211_IS_CHAN_2GHZ(c))
power = min(sc->txpow[chan - 1], 31);
else
power = 31;
/* adjust txpower using ifconfig settings */
power -= (100 - ic->ic_txpowlimit) / 8;
DPRINTFN(2, "setting channel to %u, txpower to %u\n", chan, power);
switch (sc->rf_rev) {
case RAL_RF_2522:
ural_rf_write(sc, RAL_RF1, 0x00814);
ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]);
ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
break;
case RAL_RF_2523:
ural_rf_write(sc, RAL_RF1, 0x08804);
ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]);
ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044);
ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
break;
case RAL_RF_2524:
ural_rf_write(sc, RAL_RF1, 0x0c808);
ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]);
ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
break;
case RAL_RF_2525:
ural_rf_write(sc, RAL_RF1, 0x08808);
ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]);
ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
ural_rf_write(sc, RAL_RF1, 0x08808);
ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]);
ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286);
break;
case RAL_RF_2525E:
ural_rf_write(sc, RAL_RF1, 0x08808);
ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]);
ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282);
break;
case RAL_RF_2526:
ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]);
ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
ural_rf_write(sc, RAL_RF1, 0x08804);
ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]);
ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044);
ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381);
break;
/* dual-band RF */
case RAL_RF_5222:
for (i = 0; ural_rf5222[i].chan != chan; i++);
ural_rf_write(sc, RAL_RF1, ural_rf5222[i].r1);
ural_rf_write(sc, RAL_RF2, ural_rf5222[i].r2);
ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040);
ural_rf_write(sc, RAL_RF4, ural_rf5222[i].r4);
break;
}
if (ic->ic_opmode != IEEE80211_M_MONITOR &&
(ic->ic_flags & IEEE80211_F_SCAN) == 0) {
/* set Japan filter bit for channel 14 */
tmp = ural_bbp_read(sc, 70);
tmp &= ~RAL_JAPAN_FILTER;
if (chan == 14)
tmp |= RAL_JAPAN_FILTER;
ural_bbp_write(sc, 70, tmp);
/* clear CRC errors */
ural_read(sc, RAL_STA_CSR0);
ural_pause(sc, hz / 100);
ural_disable_rf_tune(sc);
}
/* XXX doesn't belong here */
/* update basic rate set */
ural_set_basicrates(sc, c);
/* give the hardware some time to do the switchover */
ural_pause(sc, hz / 100);
}
/*
* Disable RF auto-tuning.
*/
static void
ural_disable_rf_tune(struct ural_softc *sc)
{
uint32_t tmp;
if (sc->rf_rev != RAL_RF_2523) {
tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE;
ural_rf_write(sc, RAL_RF1, tmp);
}
tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE;
ural_rf_write(sc, RAL_RF3, tmp);
DPRINTFN(2, "disabling RF autotune\n");
}
/*
* Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF
* synchronization.
*/
static void
ural_enable_tsf_sync(struct ural_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint16_t logcwmin, preload, tmp;
/* first, disable TSF synchronization */
ural_write(sc, RAL_TXRX_CSR19, 0);
tmp = (16 * vap->iv_bss->ni_intval) << 4;
ural_write(sc, RAL_TXRX_CSR18, tmp);
logcwmin = (ic->ic_opmode == IEEE80211_M_IBSS) ? 2 : 0;
preload = (ic->ic_opmode == IEEE80211_M_IBSS) ? 320 : 6;
tmp = logcwmin << 12 | preload;
ural_write(sc, RAL_TXRX_CSR20, tmp);
/* finally, enable TSF synchronization */
tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN;
if (ic->ic_opmode == IEEE80211_M_STA)
tmp |= RAL_ENABLE_TSF_SYNC(1);
else
tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR;
ural_write(sc, RAL_TXRX_CSR19, tmp);
DPRINTF("enabling TSF synchronization\n");
}
static void
ural_enable_tsf(struct ural_softc *sc)
{
/* first, disable TSF synchronization */
ural_write(sc, RAL_TXRX_CSR19, 0);
ural_write(sc, RAL_TXRX_CSR19, RAL_ENABLE_TSF | RAL_ENABLE_TSF_SYNC(2));
}
#define RAL_RXTX_TURNAROUND 5 /* us */
static void
-ural_update_slot(struct ifnet *ifp)
+ural_update_slot(struct ural_softc *sc)
{
- struct ieee80211com *ic = ifp->if_l2com;
- struct ural_softc *sc = ic->ic_softc;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t slottime, sifs, eifs;
slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20;
/*
* These settings may sound a bit inconsistent but this is what the
* reference driver does.
*/
if (ic->ic_curmode == IEEE80211_MODE_11B) {
sifs = 16 - RAL_RXTX_TURNAROUND;
eifs = 364;
} else {
sifs = 10 - RAL_RXTX_TURNAROUND;
eifs = 64;
}
ural_write(sc, RAL_MAC_CSR10, slottime);
ural_write(sc, RAL_MAC_CSR11, sifs);
ural_write(sc, RAL_MAC_CSR12, eifs);
}
static void
ural_set_txpreamble(struct ural_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t tmp;
tmp = ural_read(sc, RAL_TXRX_CSR10);
tmp &= ~RAL_SHORT_PREAMBLE;
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
tmp |= RAL_SHORT_PREAMBLE;
ural_write(sc, RAL_TXRX_CSR10, tmp);
}
static void
ural_set_basicrates(struct ural_softc *sc, const struct ieee80211_channel *c)
{
/* XXX wrong, take from rate set */
/* update basic rate set */
if (IEEE80211_IS_CHAN_5GHZ(c)) {
/* 11a basic rates: 6, 12, 24Mbps */
ural_write(sc, RAL_TXRX_CSR11, 0x150);
} else if (IEEE80211_IS_CHAN_ANYG(c)) {
/* 11g basic rates: 1, 2, 5.5, 11, 6, 12, 24Mbps */
ural_write(sc, RAL_TXRX_CSR11, 0x15f);
} else {
/* 11b basic rates: 1, 2Mbps */
ural_write(sc, RAL_TXRX_CSR11, 0x3);
}
}
static void
ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid)
{
uint16_t tmp;
tmp = bssid[0] | bssid[1] << 8;
ural_write(sc, RAL_MAC_CSR5, tmp);
tmp = bssid[2] | bssid[3] << 8;
ural_write(sc, RAL_MAC_CSR6, tmp);
tmp = bssid[4] | bssid[5] << 8;
ural_write(sc, RAL_MAC_CSR7, tmp);
DPRINTF("setting BSSID to %6D\n", bssid, ":");
}
static void
-ural_set_macaddr(struct ural_softc *sc, uint8_t *addr)
+ural_set_macaddr(struct ural_softc *sc, const uint8_t *addr)
{
uint16_t tmp;
tmp = addr[0] | addr[1] << 8;
ural_write(sc, RAL_MAC_CSR2, tmp);
tmp = addr[2] | addr[3] << 8;
ural_write(sc, RAL_MAC_CSR3, tmp);
tmp = addr[4] | addr[5] << 8;
ural_write(sc, RAL_MAC_CSR4, tmp);
DPRINTF("setting MAC address to %6D\n", addr, ":");
}
static void
ural_setpromisc(struct ural_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
uint32_t tmp;
tmp = ural_read(sc, RAL_TXRX_CSR2);
tmp &= ~RAL_DROP_NOT_TO_ME;
- if (!(ifp->if_flags & IFF_PROMISC))
+ if (sc->sc_ic.ic_promisc == 0)
tmp |= RAL_DROP_NOT_TO_ME;
ural_write(sc, RAL_TXRX_CSR2, tmp);
- DPRINTF("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ?
+ DPRINTF("%s promiscuous mode\n", sc->sc_ic.ic_promisc ?
"entering" : "leaving");
}
static void
ural_update_promisc(struct ieee80211com *ic)
{
struct ural_softc *sc = ic->ic_softc;
- if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
RAL_LOCK(sc);
- ural_setpromisc(sc);
+ if (sc->sc_running)
+ ural_setpromisc(sc);
RAL_UNLOCK(sc);
}
static const char *
ural_get_rf(int rev)
{
switch (rev) {
case RAL_RF_2522: return "RT2522";
case RAL_RF_2523: return "RT2523";
case RAL_RF_2524: return "RT2524";
case RAL_RF_2525: return "RT2525";
case RAL_RF_2525E: return "RT2525e";
case RAL_RF_2526: return "RT2526";
case RAL_RF_5222: return "RT5222";
default: return "unknown";
}
}
static void
ural_read_eeprom(struct ural_softc *sc)
{
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t val;
ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2);
val = le16toh(val);
sc->rf_rev = (val >> 11) & 0x7;
sc->hw_radio = (val >> 10) & 0x1;
sc->led_mode = (val >> 6) & 0x7;
sc->rx_ant = (val >> 4) & 0x3;
sc->tx_ant = (val >> 2) & 0x3;
sc->nb_ant = val & 0x3;
/* read MAC address */
- ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, sc->sc_bssid, 6);
+ ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_macaddr, 6);
/* read default values for BBP registers */
ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16);
/* read Tx power for all b/g channels */
ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14);
}
static int
ural_bbp_init(struct ural_softc *sc)
{
#define N(a) ((int)(sizeof (a) / sizeof ((a)[0])))
int i, ntries;
/* wait for BBP to be ready */
for (ntries = 0; ntries < 100; ntries++) {
if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0)
break;
if (ural_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev, "timeout waiting for BBP\n");
return EIO;
}
/* initialize BBP registers to default values */
for (i = 0; i < N(ural_def_bbp); i++)
ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val);
#if 0
/* initialize BBP registers to values stored in EEPROM */
for (i = 0; i < 16; i++) {
if (sc->bbp_prom[i].reg == 0xff)
continue;
ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val);
}
#endif
return 0;
#undef N
}
static void
ural_set_txantenna(struct ural_softc *sc, int antenna)
{
uint16_t tmp;
uint8_t tx;
tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK;
if (antenna == 1)
tx |= RAL_BBP_ANTA;
else if (antenna == 2)
tx |= RAL_BBP_ANTB;
else
tx |= RAL_BBP_DIVERSITY;
/* need to force I/Q flip for RF 2525e, 2526 and 5222 */
if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 ||
sc->rf_rev == RAL_RF_5222)
tx |= RAL_BBP_FLIPIQ;
ural_bbp_write(sc, RAL_BBP_TX, tx);
/* update values in PHY_CSR5 and PHY_CSR6 */
tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7;
ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7));
tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7;
ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7));
}
static void
ural_set_rxantenna(struct ural_softc *sc, int antenna)
{
uint8_t rx;
rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK;
if (antenna == 1)
rx |= RAL_BBP_ANTA;
else if (antenna == 2)
rx |= RAL_BBP_ANTB;
else
rx |= RAL_BBP_DIVERSITY;
/* need to force no I/Q flip for RF 2525e and 2526 */
if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526)
rx &= ~RAL_BBP_FLIPIQ;
ural_bbp_write(sc, RAL_BBP_RX, rx);
}
static void
-ural_init_locked(struct ural_softc *sc)
+ural_init(struct ural_softc *sc)
{
#define N(a) ((int)(sizeof (a) / sizeof ((a)[0])))
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint16_t tmp;
int i, ntries;
RAL_LOCK_ASSERT(sc, MA_OWNED);
ural_set_testmode(sc);
ural_write(sc, 0x308, 0x00f0); /* XXX magic */
ural_stop(sc);
/* initialize MAC registers to default values */
for (i = 0; i < N(ural_def_mac); i++)
ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val);
/* wait for BBP and RF to wake up (this can take a long time!) */
for (ntries = 0; ntries < 100; ntries++) {
tmp = ural_read(sc, RAL_MAC_CSR17);
if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) ==
(RAL_BBP_AWAKE | RAL_RF_AWAKE))
break;
if (ural_pause(sc, hz / 100))
break;
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"timeout waiting for BBP/RF to wakeup\n");
goto fail;
}
/* we're ready! */
ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY);
/* set basic rate set (will be updated later) */
ural_write(sc, RAL_TXRX_CSR11, 0x15f);
if (ural_bbp_init(sc) != 0)
goto fail;
ural_set_chan(sc, ic->ic_curchan);
/* clear statistic registers (STA_CSR0 to STA_CSR10) */
ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
ural_set_txantenna(sc, sc->tx_ant);
ural_set_rxantenna(sc, sc->rx_ant);
- ural_set_macaddr(sc, IF_LLADDR(ifp));
+ ural_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
/*
* Allocate Tx and Rx xfer queues.
*/
ural_setup_tx_list(sc);
/* kick Rx */
tmp = RAL_DROP_PHY | RAL_DROP_CRC;
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
tmp |= RAL_DROP_CTL | RAL_DROP_BAD_VERSION;
if (ic->ic_opmode != IEEE80211_M_HOSTAP)
tmp |= RAL_DROP_TODS;
- if (!(ifp->if_flags & IFF_PROMISC))
+ if (ic->ic_promisc == 0)
tmp |= RAL_DROP_NOT_TO_ME;
}
ural_write(sc, RAL_TXRX_CSR2, tmp);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_running = 1;
usbd_xfer_set_stall(sc->sc_xfer[URAL_BULK_WR]);
usbd_transfer_start(sc->sc_xfer[URAL_BULK_RD]);
return;
fail: ural_stop(sc);
#undef N
}
static void
-ural_init(void *priv)
-{
- struct ural_softc *sc = priv;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
-
- RAL_LOCK(sc);
- ural_init_locked(sc);
- RAL_UNLOCK(sc);
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ieee80211_start_all(ic); /* start all vap's */
-}
-
-static void
ural_stop(struct ural_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
RAL_LOCK_ASSERT(sc, MA_OWNED);
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_running = 0;
/*
* Drain all the transfers, if not already drained:
*/
RAL_UNLOCK(sc);
usbd_transfer_drain(sc->sc_xfer[URAL_BULK_WR]);
usbd_transfer_drain(sc->sc_xfer[URAL_BULK_RD]);
RAL_LOCK(sc);
ural_unsetup_tx_list(sc);
/* disable Rx */
ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX);
/* reset ASIC and BBP (but won't reset MAC registers!) */
ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP);
/* wait a little */
ural_pause(sc, hz / 10);
ural_write(sc, RAL_MAC_CSR1, 0);
/* wait a little */
ural_pause(sc, hz / 10);
}
static int
ural_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct ural_softc *sc = ic->ic_softc;
RAL_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!sc->sc_running) {
RAL_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return ENETDOWN;
}
if (sc->tx_nfree < RAL_TX_MINFREE) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
RAL_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return EIO;
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
if (params == NULL) {
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
*/
if (ural_tx_mgt(sc, m, ni) != 0)
goto bad;
} else {
/*
* Caller supplied explicit parameters to use in
* sending the frame.
*/
if (ural_tx_raw(sc, m, ni, params) != 0)
goto bad;
}
RAL_UNLOCK(sc);
return 0;
bad:
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
RAL_UNLOCK(sc);
ieee80211_free_node(ni);
return EIO; /* XXX */
}
static void
ural_ratectl_start(struct ural_softc *sc, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ural_vap *uvp = URAL_VAP(vap);
/* clear statistic registers (STA_CSR0 to STA_CSR10) */
ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta);
usb_callout_reset(&uvp->ratectl_ch, hz, ural_ratectl_timeout, uvp);
}
static void
ural_ratectl_timeout(void *arg)
{
struct ural_vap *uvp = arg;
struct ieee80211vap *vap = &uvp->vap;
struct ieee80211com *ic = vap->iv_ic;
ieee80211_runtask(ic, &uvp->ratectl_task);
}
static void
ural_ratectl_task(void *arg, int pending)
{
struct ural_vap *uvp = arg;
struct ieee80211vap *vap = &uvp->vap;
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct ural_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
int ok, fail;
int sum, retrycnt;
ni = ieee80211_ref_node(vap->iv_bss);
RAL_LOCK(sc);
/* read and clear statistic registers (STA_CSR0 to STA_CSR10) */
ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof(sc->sta));
ok = sc->sta[7] + /* TX ok w/o retry */
sc->sta[8]; /* TX ok w/ retry */
fail = sc->sta[9]; /* TX retry-fail count */
sum = ok+fail;
retrycnt = sc->sta[8] + fail;
ieee80211_ratectl_tx_update(vap, ni, &sum, &ok, &retrycnt);
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, fail); /* count TX retry-fail as Tx errors */
+ /* count TX retry-fail as Tx errors */
+ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, fail);
usb_callout_reset(&uvp->ratectl_ch, hz, ural_ratectl_timeout, uvp);
RAL_UNLOCK(sc);
ieee80211_free_node(ni);
}
static int
ural_pause(struct ural_softc *sc, int timeout)
{
usb_pause_mtx(&sc->sc_mtx, timeout);
return (0);
}
Index: head/sys/dev/usb/wlan/if_uralvar.h
===================================================================
--- head/sys/dev/usb/wlan/if_uralvar.h (revision 287196)
+++ head/sys/dev/usb/wlan/if_uralvar.h (revision 287197)
@@ -1,135 +1,136 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2005
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define RAL_TX_LIST_COUNT 8
#define RAL_TX_MINFREE 2
#define URAL_SCAN_START 1
#define URAL_SCAN_END 2
#define URAL_SET_CHANNEL 3
struct ural_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
uint8_t wr_antenna;
} __packed __aligned(8);
#define RAL_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_ANTENNA) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
struct ural_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
uint8_t wt_antenna;
} __packed __aligned(8);
#define RAL_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_ANTENNA))
struct ural_softc;
struct ural_tx_data {
STAILQ_ENTRY(ural_tx_data) next;
struct ural_softc *sc;
struct ural_tx_desc desc;
struct mbuf *m;
struct ieee80211_node *ni;
int rate;
};
typedef STAILQ_HEAD(, ural_tx_data) ural_txdhead;
struct ural_vap {
struct ieee80211vap vap;
struct ieee80211_beacon_offsets bo;
struct usb_callout ratectl_ch;
struct task ratectl_task;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define URAL_VAP(vap) ((struct ural_vap *)(vap))
enum {
URAL_BULK_WR,
URAL_BULK_RD,
URAL_N_TRANSFER = 2,
};
struct ural_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct usb_device *sc_udev;
uint32_t asic_rev;
uint8_t rf_rev;
struct usb_xfer *sc_xfer[URAL_N_TRANSFER];
struct ural_tx_data tx_data[RAL_TX_LIST_COUNT];
ural_txdhead tx_q;
ural_txdhead tx_free;
int tx_nfree;
struct ural_rx_desc sc_rx_desc;
struct mtx sc_mtx;
uint16_t sta[11];
uint32_t rf_regs[4];
uint8_t txpow[14];
- uint8_t sc_bssid[6];
- uint8_t sc_detached;
+ u_int sc_detached:1,
+ sc_running:1;
struct {
uint8_t val;
uint8_t reg;
} __packed bbp_prom[16];
int led_mode;
int hw_radio;
int rx_ant;
int tx_ant;
int nb_ant;
struct ural_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct ural_tx_radiotap_header sc_txtap;
int sc_txtap_len;
};
#define RAL_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define RAL_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define RAL_LOCK_ASSERT(sc, t) mtx_assert(&(sc)->sc_mtx, t)
Index: head/sys/dev/usb/wlan/if_urtw.c
===================================================================
--- head/sys/dev/usb/wlan/if_urtw.c (revision 287196)
+++ head/sys/dev/usb/wlan/if_urtw.c (revision 287197)
@@ -1,4482 +1,4398 @@
/*-
* Copyright (c) 2008 Weongyo Jeong <weongyo@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kdb.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#endif
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include "usbdevs.h"
#include <dev/usb/wlan/if_urtwreg.h>
#include <dev/usb/wlan/if_urtwvar.h>
static SYSCTL_NODE(_hw_usb, OID_AUTO, urtw, CTLFLAG_RW, 0, "USB Realtek 8187L");
#ifdef URTW_DEBUG
int urtw_debug = 0;
SYSCTL_INT(_hw_usb_urtw, OID_AUTO, debug, CTLFLAG_RWTUN, &urtw_debug, 0,
"control debugging printfs");
enum {
URTW_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
URTW_DEBUG_RECV = 0x00000002, /* basic recv operation */
URTW_DEBUG_RESET = 0x00000004, /* reset processing */
URTW_DEBUG_TX_PROC = 0x00000008, /* tx ISR proc */
URTW_DEBUG_RX_PROC = 0x00000010, /* rx ISR proc */
URTW_DEBUG_STATE = 0x00000020, /* 802.11 state transitions */
URTW_DEBUG_STAT = 0x00000040, /* statistic */
URTW_DEBUG_INIT = 0x00000080, /* initialization of dev */
URTW_DEBUG_TXSTATUS = 0x00000100, /* tx status */
URTW_DEBUG_ANY = 0xffffffff
};
#define DPRINTF(sc, m, fmt, ...) do { \
if (sc->sc_debug & (m)) \
printf(fmt, __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, m, fmt, ...) do { \
(void) sc; \
} while (0)
#endif
static int urtw_preamble_mode = URTW_PREAMBLE_MODE_LONG;
SYSCTL_INT(_hw_usb_urtw, OID_AUTO, preamble_mode, CTLFLAG_RWTUN,
&urtw_preamble_mode, 0, "set the preable mode (long or short)");
/* recognized device vendors/products */
#define urtw_lookup(v, p) \
((const struct urtw_type *)usb_lookup(urtw_devs, v, p))
#define URTW_DEV_B(v,p) \
{ USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, URTW_REV_RTL8187B) }
#define URTW_DEV_L(v,p) \
{ USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, URTW_REV_RTL8187L) }
#define URTW_REV_RTL8187B 0
#define URTW_REV_RTL8187L 1
static const STRUCT_USB_HOST_ID urtw_devs[] = {
URTW_DEV_B(NETGEAR, WG111V3),
URTW_DEV_B(REALTEK, RTL8187B_0),
URTW_DEV_B(REALTEK, RTL8187B_1),
URTW_DEV_B(REALTEK, RTL8187B_2),
URTW_DEV_B(SITECOMEU, WL168V4),
URTW_DEV_L(ASUS, P5B_WIFI),
URTW_DEV_L(BELKIN, F5D7050E),
URTW_DEV_L(LINKSYS4, WUSB54GCV2),
URTW_DEV_L(NETGEAR, WG111V2),
URTW_DEV_L(REALTEK, RTL8187),
URTW_DEV_L(SITECOMEU, WL168V1),
URTW_DEV_L(SURECOM, EP9001G2A),
{ USB_VPI(USB_VENDOR_OVISLINK, 0x8187, URTW_REV_RTL8187L) },
{ USB_VPI(USB_VENDOR_DICKSMITH, 0x9401, URTW_REV_RTL8187L) },
{ USB_VPI(USB_VENDOR_HP, 0xca02, URTW_REV_RTL8187L) },
{ USB_VPI(USB_VENDOR_LOGITEC, 0x010c, URTW_REV_RTL8187L) },
{ USB_VPI(USB_VENDOR_NETGEAR, 0x6100, URTW_REV_RTL8187L) },
{ USB_VPI(USB_VENDOR_SPHAIRON, 0x0150, URTW_REV_RTL8187L) },
{ USB_VPI(USB_VENDOR_QCOM, 0x6232, URTW_REV_RTL8187L) },
#undef URTW_DEV_L
#undef URTW_DEV_B
};
#define urtw_read8_m(sc, val, data) do { \
error = urtw_read8_c(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define urtw_write8_m(sc, val, data) do { \
error = urtw_write8_c(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define urtw_read16_m(sc, val, data) do { \
error = urtw_read16_c(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define urtw_write16_m(sc, val, data) do { \
error = urtw_write16_c(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define urtw_read32_m(sc, val, data) do { \
error = urtw_read32_c(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define urtw_write32_m(sc, val, data) do { \
error = urtw_write32_c(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define urtw_8187_write_phy_ofdm(sc, val, data) do { \
error = urtw_8187_write_phy_ofdm_c(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define urtw_8187_write_phy_cck(sc, val, data) do { \
error = urtw_8187_write_phy_cck_c(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define urtw_8225_write(sc, val, data) do { \
error = urtw_8225_write_c(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
struct urtw_pair {
uint32_t reg;
uint32_t val;
};
static uint8_t urtw_8225_agc[] = {
0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9d, 0x9c, 0x9b,
0x9a, 0x99, 0x98, 0x97, 0x96, 0x95, 0x94, 0x93, 0x92, 0x91, 0x90,
0x8f, 0x8e, 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86, 0x85,
0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 0x3a,
0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f,
0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24,
0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19,
0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e,
0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03,
0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01
};
static uint8_t urtw_8225z2_agc[] = {
0x5e, 0x5e, 0x5e, 0x5e, 0x5d, 0x5b, 0x59, 0x57, 0x55, 0x53, 0x51,
0x4f, 0x4d, 0x4b, 0x49, 0x47, 0x45, 0x43, 0x41, 0x3f, 0x3d, 0x3b,
0x39, 0x37, 0x35, 0x33, 0x31, 0x2f, 0x2d, 0x2b, 0x29, 0x27, 0x25,
0x23, 0x21, 0x1f, 0x1d, 0x1b, 0x19, 0x17, 0x15, 0x13, 0x11, 0x0f,
0x0d, 0x0b, 0x09, 0x07, 0x05, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x19, 0x19,
0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x26, 0x27, 0x27, 0x28, 0x28, 0x29, 0x2a, 0x2a,
0x2a, 0x2b, 0x2b, 0x2b, 0x2c, 0x2c, 0x2c, 0x2d, 0x2d, 0x2d, 0x2d,
0x2e, 0x2e, 0x2e, 0x2e, 0x2f, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31,
0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31,
0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31
};
static uint32_t urtw_8225_channel[] = {
0x0000, /* dummy channel 0 */
0x085c, /* 1 */
0x08dc, /* 2 */
0x095c, /* 3 */
0x09dc, /* 4 */
0x0a5c, /* 5 */
0x0adc, /* 6 */
0x0b5c, /* 7 */
0x0bdc, /* 8 */
0x0c5c, /* 9 */
0x0cdc, /* 10 */
0x0d5c, /* 11 */
0x0ddc, /* 12 */
0x0e5c, /* 13 */
0x0f72, /* 14 */
};
static uint8_t urtw_8225_gain[] = {
0x23, 0x88, 0x7c, 0xa5, /* -82dbm */
0x23, 0x88, 0x7c, 0xb5, /* -82dbm */
0x23, 0x88, 0x7c, 0xc5, /* -82dbm */
0x33, 0x80, 0x79, 0xc5, /* -78dbm */
0x43, 0x78, 0x76, 0xc5, /* -74dbm */
0x53, 0x60, 0x73, 0xc5, /* -70dbm */
0x63, 0x58, 0x70, 0xc5, /* -66dbm */
};
static struct urtw_pair urtw_8225_rf_part1[] = {
{ 0x00, 0x0067 }, { 0x01, 0x0fe0 }, { 0x02, 0x044d }, { 0x03, 0x0441 },
{ 0x04, 0x0486 }, { 0x05, 0x0bc0 }, { 0x06, 0x0ae6 }, { 0x07, 0x082a },
{ 0x08, 0x001f }, { 0x09, 0x0334 }, { 0x0a, 0x0fd4 }, { 0x0b, 0x0391 },
{ 0x0c, 0x0050 }, { 0x0d, 0x06db }, { 0x0e, 0x0029 }, { 0x0f, 0x0914 },
};
static struct urtw_pair urtw_8225_rf_part2[] = {
{ 0x00, 0x01 }, { 0x01, 0x02 }, { 0x02, 0x42 }, { 0x03, 0x00 },
{ 0x04, 0x00 }, { 0x05, 0x00 }, { 0x06, 0x40 }, { 0x07, 0x00 },
{ 0x08, 0x40 }, { 0x09, 0xfe }, { 0x0a, 0x09 }, { 0x0b, 0x80 },
{ 0x0c, 0x01 }, { 0x0e, 0xd3 }, { 0x0f, 0x38 }, { 0x10, 0x84 },
{ 0x11, 0x06 }, { 0x12, 0x20 }, { 0x13, 0x20 }, { 0x14, 0x00 },
{ 0x15, 0x40 }, { 0x16, 0x00 }, { 0x17, 0x40 }, { 0x18, 0xef },
{ 0x19, 0x19 }, { 0x1a, 0x20 }, { 0x1b, 0x76 }, { 0x1c, 0x04 },
{ 0x1e, 0x95 }, { 0x1f, 0x75 }, { 0x20, 0x1f }, { 0x21, 0x27 },
{ 0x22, 0x16 }, { 0x24, 0x46 }, { 0x25, 0x20 }, { 0x26, 0x90 },
{ 0x27, 0x88 }
};
static struct urtw_pair urtw_8225_rf_part3[] = {
{ 0x00, 0x98 }, { 0x03, 0x20 }, { 0x04, 0x7e }, { 0x05, 0x12 },
{ 0x06, 0xfc }, { 0x07, 0x78 }, { 0x08, 0x2e }, { 0x10, 0x9b },
{ 0x11, 0x88 }, { 0x12, 0x47 }, { 0x13, 0xd0 }, { 0x19, 0x00 },
{ 0x1a, 0xa0 }, { 0x1b, 0x08 }, { 0x40, 0x86 }, { 0x41, 0x8d },
{ 0x42, 0x15 }, { 0x43, 0x18 }, { 0x44, 0x1f }, { 0x45, 0x1e },
{ 0x46, 0x1a }, { 0x47, 0x15 }, { 0x48, 0x10 }, { 0x49, 0x0a },
{ 0x4a, 0x05 }, { 0x4b, 0x02 }, { 0x4c, 0x05 }
};
static uint16_t urtw_8225_rxgain[] = {
0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409,
0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541,
0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583,
0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644,
0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688,
0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745,
0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789,
0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793,
0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d,
0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9,
0x07aa, 0x07ab, 0x07ac, 0x07ad, 0x07b0, 0x07b1, 0x07b2, 0x07b3,
0x07b4, 0x07b5, 0x07b8, 0x07b9, 0x07ba, 0x07bb, 0x07bb
};
static uint8_t urtw_8225_threshold[] = {
0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd,
};
static uint8_t urtw_8225_tx_gain_cck_ofdm[] = {
0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
};
static uint8_t urtw_8225_txpwr_cck[] = {
0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
};
static uint8_t urtw_8225_txpwr_cck_ch14[] = {
0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
};
static uint8_t urtw_8225_txpwr_ofdm[]={
0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
};
static uint8_t urtw_8225v2_gain_bg[]={
0x23, 0x15, 0xa5, /* -82-1dbm */
0x23, 0x15, 0xb5, /* -82-2dbm */
0x23, 0x15, 0xc5, /* -82-3dbm */
0x33, 0x15, 0xc5, /* -78dbm */
0x43, 0x15, 0xc5, /* -74dbm */
0x53, 0x15, 0xc5, /* -70dbm */
0x63, 0x15, 0xc5, /* -66dbm */
};
static struct urtw_pair urtw_8225v2_rf_part1[] = {
{ 0x00, 0x02bf }, { 0x01, 0x0ee0 }, { 0x02, 0x044d }, { 0x03, 0x0441 },
{ 0x04, 0x08c3 }, { 0x05, 0x0c72 }, { 0x06, 0x00e6 }, { 0x07, 0x082a },
{ 0x08, 0x003f }, { 0x09, 0x0335 }, { 0x0a, 0x09d4 }, { 0x0b, 0x07bb },
{ 0x0c, 0x0850 }, { 0x0d, 0x0cdf }, { 0x0e, 0x002b }, { 0x0f, 0x0114 }
};
static struct urtw_pair urtw_8225v2b_rf_part0[] = {
{ 0x00, 0x00b7 }, { 0x01, 0x0ee0 }, { 0x02, 0x044d }, { 0x03, 0x0441 },
{ 0x04, 0x08c3 }, { 0x05, 0x0c72 }, { 0x06, 0x00e6 }, { 0x07, 0x082a },
{ 0x08, 0x003f }, { 0x09, 0x0335 }, { 0x0a, 0x09d4 }, { 0x0b, 0x07bb },
{ 0x0c, 0x0850 }, { 0x0d, 0x0cdf }, { 0x0e, 0x002b }, { 0x0f, 0x0114 }
};
static struct urtw_pair urtw_8225v2b_rf_part1[] = {
{0x0f0, 0x32}, {0x0f1, 0x32}, {0x0f2, 0x00},
{0x0f3, 0x00}, {0x0f4, 0x32}, {0x0f5, 0x43},
{0x0f6, 0x00}, {0x0f7, 0x00}, {0x0f8, 0x46},
{0x0f9, 0xa4}, {0x0fa, 0x00}, {0x0fb, 0x00},
{0x0fc, 0x96}, {0x0fd, 0xa4}, {0x0fe, 0x00},
{0x0ff, 0x00}, {0x158, 0x4b}, {0x159, 0x00},
{0x15a, 0x4b}, {0x15b, 0x00}, {0x160, 0x4b},
{0x161, 0x09}, {0x162, 0x4b}, {0x163, 0x09},
{0x1ce, 0x0f}, {0x1cf, 0x00}, {0x1e0, 0xff},
{0x1e1, 0x0f}, {0x1e2, 0x00}, {0x1f0, 0x4e},
{0x1f1, 0x01}, {0x1f2, 0x02}, {0x1f3, 0x03},
{0x1f4, 0x04}, {0x1f5, 0x05}, {0x1f6, 0x06},
{0x1f7, 0x07}, {0x1f8, 0x08}, {0x24e, 0x00},
{0x20c, 0x04}, {0x221, 0x61}, {0x222, 0x68},
{0x223, 0x6f}, {0x224, 0x76}, {0x225, 0x7d},
{0x226, 0x84}, {0x227, 0x8d}, {0x24d, 0x08},
{0x250, 0x05}, {0x251, 0xf5}, {0x252, 0x04},
{0x253, 0xa0}, {0x254, 0x1f}, {0x255, 0x23},
{0x256, 0x45}, {0x257, 0x67}, {0x258, 0x08},
{0x259, 0x08}, {0x25a, 0x08}, {0x25b, 0x08},
{0x260, 0x08}, {0x261, 0x08}, {0x262, 0x08},
{0x263, 0x08}, {0x264, 0xcf}, {0x272, 0x56},
{0x273, 0x9a}, {0x034, 0xf0}, {0x035, 0x0f},
{0x05b, 0x40}, {0x084, 0x88}, {0x085, 0x24},
{0x088, 0x54}, {0x08b, 0xb8}, {0x08c, 0x07},
{0x08d, 0x00}, {0x094, 0x1b}, {0x095, 0x12},
{0x096, 0x00}, {0x097, 0x06}, {0x09d, 0x1a},
{0x09f, 0x10}, {0x0b4, 0x22}, {0x0be, 0x80},
{0x0db, 0x00}, {0x0ee, 0x00}, {0x091, 0x03},
{0x24c, 0x00}, {0x39f, 0x00}, {0x08c, 0x01},
{0x08d, 0x10}, {0x08e, 0x08}, {0x08f, 0x00}
};
static struct urtw_pair urtw_8225v2_rf_part2[] = {
{ 0x00, 0x01 }, { 0x01, 0x02 }, { 0x02, 0x42 }, { 0x03, 0x00 },
{ 0x04, 0x00 }, { 0x05, 0x00 }, { 0x06, 0x40 }, { 0x07, 0x00 },
{ 0x08, 0x40 }, { 0x09, 0xfe }, { 0x0a, 0x08 }, { 0x0b, 0x80 },
{ 0x0c, 0x01 }, { 0x0d, 0x43 }, { 0x0e, 0xd3 }, { 0x0f, 0x38 },
{ 0x10, 0x84 }, { 0x11, 0x07 }, { 0x12, 0x20 }, { 0x13, 0x20 },
{ 0x14, 0x00 }, { 0x15, 0x40 }, { 0x16, 0x00 }, { 0x17, 0x40 },
{ 0x18, 0xef }, { 0x19, 0x19 }, { 0x1a, 0x20 }, { 0x1b, 0x15 },
{ 0x1c, 0x04 }, { 0x1d, 0xc5 }, { 0x1e, 0x95 }, { 0x1f, 0x75 },
{ 0x20, 0x1f }, { 0x21, 0x17 }, { 0x22, 0x16 }, { 0x23, 0x80 },
{ 0x24, 0x46 }, { 0x25, 0x00 }, { 0x26, 0x90 }, { 0x27, 0x88 }
};
static struct urtw_pair urtw_8225v2b_rf_part2[] = {
{ 0x00, 0x10 }, { 0x01, 0x0d }, { 0x02, 0x01 }, { 0x03, 0x00 },
{ 0x04, 0x14 }, { 0x05, 0xfb }, { 0x06, 0xfb }, { 0x07, 0x60 },
{ 0x08, 0x00 }, { 0x09, 0x60 }, { 0x0a, 0x00 }, { 0x0b, 0x00 },
{ 0x0c, 0x00 }, { 0x0d, 0x5c }, { 0x0e, 0x00 }, { 0x0f, 0x00 },
{ 0x10, 0x40 }, { 0x11, 0x00 }, { 0x12, 0x40 }, { 0x13, 0x00 },
{ 0x14, 0x00 }, { 0x15, 0x00 }, { 0x16, 0xa8 }, { 0x17, 0x26 },
{ 0x18, 0x32 }, { 0x19, 0x33 }, { 0x1a, 0x07 }, { 0x1b, 0xa5 },
{ 0x1c, 0x6f }, { 0x1d, 0x55 }, { 0x1e, 0xc8 }, { 0x1f, 0xb3 },
{ 0x20, 0x0a }, { 0x21, 0xe1 }, { 0x22, 0x2C }, { 0x23, 0x8a },
{ 0x24, 0x86 }, { 0x25, 0x83 }, { 0x26, 0x34 }, { 0x27, 0x0f },
{ 0x28, 0x4f }, { 0x29, 0x24 }, { 0x2a, 0x6f }, { 0x2b, 0xc2 },
{ 0x2c, 0x6b }, { 0x2d, 0x40 }, { 0x2e, 0x80 }, { 0x2f, 0x00 },
{ 0x30, 0xc0 }, { 0x31, 0xc1 }, { 0x32, 0x58 }, { 0x33, 0xf1 },
{ 0x34, 0x00 }, { 0x35, 0xe4 }, { 0x36, 0x90 }, { 0x37, 0x3e },
{ 0x38, 0x6d }, { 0x39, 0x3c }, { 0x3a, 0xfb }, { 0x3b, 0x07 }
};
static struct urtw_pair urtw_8225v2_rf_part3[] = {
{ 0x00, 0x98 }, { 0x03, 0x20 }, { 0x04, 0x7e }, { 0x05, 0x12 },
{ 0x06, 0xfc }, { 0x07, 0x78 }, { 0x08, 0x2e }, { 0x09, 0x11 },
{ 0x0a, 0x17 }, { 0x0b, 0x11 }, { 0x10, 0x9b }, { 0x11, 0x88 },
{ 0x12, 0x47 }, { 0x13, 0xd0 }, { 0x19, 0x00 }, { 0x1a, 0xa0 },
{ 0x1b, 0x08 }, { 0x1d, 0x00 }, { 0x40, 0x86 }, { 0x41, 0x9d },
{ 0x42, 0x15 }, { 0x43, 0x18 }, { 0x44, 0x36 }, { 0x45, 0x35 },
{ 0x46, 0x2e }, { 0x47, 0x25 }, { 0x48, 0x1c }, { 0x49, 0x12 },
{ 0x4a, 0x09 }, { 0x4b, 0x04 }, { 0x4c, 0x05 }
};
static uint16_t urtw_8225v2_rxgain[] = {
0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0008, 0x0009,
0x000a, 0x000b, 0x0102, 0x0103, 0x0104, 0x0105, 0x0140, 0x0141,
0x0142, 0x0143, 0x0144, 0x0145, 0x0180, 0x0181, 0x0182, 0x0183,
0x0184, 0x0185, 0x0188, 0x0189, 0x018a, 0x018b, 0x0243, 0x0244,
0x0245, 0x0280, 0x0281, 0x0282, 0x0283, 0x0284, 0x0285, 0x0288,
0x0289, 0x028a, 0x028b, 0x028c, 0x0342, 0x0343, 0x0344, 0x0345,
0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0388, 0x0389,
0x038a, 0x038b, 0x038c, 0x038d, 0x0390, 0x0391, 0x0392, 0x0393,
0x0394, 0x0395, 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d,
0x03a0, 0x03a1, 0x03a2, 0x03a3, 0x03a4, 0x03a5, 0x03a8, 0x03a9,
0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3,
0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb
};
static uint16_t urtw_8225v2b_rxgain[] = {
0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409,
0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541,
0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583,
0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644,
0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688,
0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745,
0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789,
0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793,
0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d,
0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9,
0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3,
0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb
};
static uint8_t urtw_8225v2_tx_gain_cck_ofdm[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
};
static uint8_t urtw_8225v2_txpwr_cck[] = {
0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
};
static uint8_t urtw_8225v2_txpwr_cck_ch14[] = {
0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
};
static uint8_t urtw_8225v2b_txpwr_cck[] = {
0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04,
0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03,
0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03,
0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03
};
static uint8_t urtw_8225v2b_txpwr_cck_ch14[] = {
0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00,
0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00,
0x30, 0x2f, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00
};
static struct urtw_pair urtw_ratetable[] = {
{ 2, 0 }, { 4, 1 }, { 11, 2 }, { 12, 4 }, { 18, 5 },
{ 22, 3 }, { 24, 6 }, { 36, 7 }, { 48, 8 }, { 72, 9 },
{ 96, 10 }, { 108, 11 }
};
#if 0
static const uint8_t urtw_8187b_reg_table[][3] = {
{ 0xf0, 0x32, 0 }, { 0xf1, 0x32, 0 }, { 0xf2, 0x00, 0 },
{ 0xf3, 0x00, 0 }, { 0xf4, 0x32, 0 }, { 0xf5, 0x43, 0 },
{ 0xf6, 0x00, 0 }, { 0xf7, 0x00, 0 }, { 0xf8, 0x46, 0 },
{ 0xf9, 0xa4, 0 }, { 0xfa, 0x00, 0 }, { 0xfb, 0x00, 0 },
{ 0xfc, 0x96, 0 }, { 0xfd, 0xa4, 0 }, { 0xfe, 0x00, 0 },
{ 0xff, 0x00, 0 }, { 0x58, 0x4b, 1 }, { 0x59, 0x00, 1 },
{ 0x5a, 0x4b, 1 }, { 0x5b, 0x00, 1 }, { 0x60, 0x4b, 1 },
{ 0x61, 0x09, 1 }, { 0x62, 0x4b, 1 }, { 0x63, 0x09, 1 },
{ 0xce, 0x0f, 1 }, { 0xcf, 0x00, 1 }, { 0xe0, 0xff, 1 },
{ 0xe1, 0x0f, 1 }, { 0xe2, 0x00, 1 }, { 0xf0, 0x4e, 1 },
{ 0xf1, 0x01, 1 }, { 0xf2, 0x02, 1 }, { 0xf3, 0x03, 1 },
{ 0xf4, 0x04, 1 }, { 0xf5, 0x05, 1 }, { 0xf6, 0x06, 1 },
{ 0xf7, 0x07, 1 }, { 0xf8, 0x08, 1 }, { 0x4e, 0x00, 2 },
{ 0x0c, 0x04, 2 }, { 0x21, 0x61, 2 }, { 0x22, 0x68, 2 },
{ 0x23, 0x6f, 2 }, { 0x24, 0x76, 2 }, { 0x25, 0x7d, 2 },
{ 0x26, 0x84, 2 }, { 0x27, 0x8d, 2 }, { 0x4d, 0x08, 2 },
{ 0x50, 0x05, 2 }, { 0x51, 0xf5, 2 }, { 0x52, 0x04, 2 },
{ 0x53, 0xa0, 2 }, { 0x54, 0x1f, 2 }, { 0x55, 0x23, 2 },
{ 0x56, 0x45, 2 }, { 0x57, 0x67, 2 }, { 0x58, 0x08, 2 },
{ 0x59, 0x08, 2 }, { 0x5a, 0x08, 2 }, { 0x5b, 0x08, 2 },
{ 0x60, 0x08, 2 }, { 0x61, 0x08, 2 }, { 0x62, 0x08, 2 },
{ 0x63, 0x08, 2 }, { 0x64, 0xcf, 2 }, { 0x72, 0x56, 2 },
{ 0x73, 0x9a, 2 }, { 0x34, 0xf0, 0 }, { 0x35, 0x0f, 0 },
{ 0x5b, 0x40, 0 }, { 0x84, 0x88, 0 }, { 0x85, 0x24, 0 },
{ 0x88, 0x54, 0 }, { 0x8b, 0xb8, 0 }, { 0x8c, 0x07, 0 },
{ 0x8d, 0x00, 0 }, { 0x94, 0x1b, 0 }, { 0x95, 0x12, 0 },
{ 0x96, 0x00, 0 }, { 0x97, 0x06, 0 }, { 0x9d, 0x1a, 0 },
{ 0x9f, 0x10, 0 }, { 0xb4, 0x22, 0 }, { 0xbe, 0x80, 0 },
{ 0xdb, 0x00, 0 }, { 0xee, 0x00, 0 }, { 0x91, 0x03, 0 },
{ 0x4c, 0x00, 2 }, { 0x9f, 0x00, 3 }, { 0x8c, 0x01, 0 },
{ 0x8d, 0x10, 0 }, { 0x8e, 0x08, 0 }, { 0x8f, 0x00, 0 }
};
#endif
static usb_callback_t urtw_bulk_rx_callback;
static usb_callback_t urtw_bulk_tx_callback;
static usb_callback_t urtw_bulk_tx_status_callback;
static const struct usb_config urtw_8187b_usbconfig[URTW_8187B_N_XFERS] = {
[URTW_8187B_BULK_RX] = {
.type = UE_BULK,
.endpoint = 0x83,
.direction = UE_DIR_IN,
.bufsize = MCLBYTES,
.flags = {
.ext_buffer = 1,
.pipe_bof = 1,
.short_xfer_ok = 1
},
.callback = urtw_bulk_rx_callback
},
[URTW_8187B_BULK_TX_STATUS] = {
.type = UE_BULK,
.endpoint = 0x89,
.direction = UE_DIR_IN,
.bufsize = sizeof(uint64_t),
.flags = {
.pipe_bof = 1,
.short_xfer_ok = 1
},
.callback = urtw_bulk_tx_status_callback
},
[URTW_8187B_BULK_TX_BE] = {
.type = UE_BULK,
.endpoint = URTW_8187B_TXPIPE_BE,
.direction = UE_DIR_OUT,
.bufsize = URTW_TX_MAXSIZE * URTW_TX_DATA_LIST_COUNT,
.flags = {
.force_short_xfer = 1,
.pipe_bof = 1,
},
.callback = urtw_bulk_tx_callback,
.timeout = URTW_DATA_TIMEOUT
},
[URTW_8187B_BULK_TX_BK] = {
.type = UE_BULK,
.endpoint = URTW_8187B_TXPIPE_BK,
.direction = UE_DIR_OUT,
.bufsize = URTW_TX_MAXSIZE,
.flags = {
.ext_buffer = 1,
.force_short_xfer = 1,
.pipe_bof = 1,
},
.callback = urtw_bulk_tx_callback,
.timeout = URTW_DATA_TIMEOUT
},
[URTW_8187B_BULK_TX_VI] = {
.type = UE_BULK,
.endpoint = URTW_8187B_TXPIPE_VI,
.direction = UE_DIR_OUT,
.bufsize = URTW_TX_MAXSIZE,
.flags = {
.ext_buffer = 1,
.force_short_xfer = 1,
.pipe_bof = 1,
},
.callback = urtw_bulk_tx_callback,
.timeout = URTW_DATA_TIMEOUT
},
[URTW_8187B_BULK_TX_VO] = {
.type = UE_BULK,
.endpoint = URTW_8187B_TXPIPE_VO,
.direction = UE_DIR_OUT,
.bufsize = URTW_TX_MAXSIZE,
.flags = {
.ext_buffer = 1,
.force_short_xfer = 1,
.pipe_bof = 1,
},
.callback = urtw_bulk_tx_callback,
.timeout = URTW_DATA_TIMEOUT
},
[URTW_8187B_BULK_TX_EP12] = {
.type = UE_BULK,
.endpoint = 0xc,
.direction = UE_DIR_OUT,
.bufsize = URTW_TX_MAXSIZE,
.flags = {
.ext_buffer = 1,
.force_short_xfer = 1,
.pipe_bof = 1,
},
.callback = urtw_bulk_tx_callback,
.timeout = URTW_DATA_TIMEOUT
}
};
static const struct usb_config urtw_8187l_usbconfig[URTW_8187L_N_XFERS] = {
[URTW_8187L_BULK_RX] = {
.type = UE_BULK,
.endpoint = 0x81,
.direction = UE_DIR_IN,
.bufsize = MCLBYTES,
.flags = {
.ext_buffer = 1,
.pipe_bof = 1,
.short_xfer_ok = 1
},
.callback = urtw_bulk_rx_callback
},
[URTW_8187L_BULK_TX_LOW] = {
.type = UE_BULK,
.endpoint = 0x2,
.direction = UE_DIR_OUT,
.bufsize = URTW_TX_MAXSIZE * URTW_TX_DATA_LIST_COUNT,
.flags = {
.force_short_xfer = 1,
.pipe_bof = 1,
},
.callback = urtw_bulk_tx_callback,
.timeout = URTW_DATA_TIMEOUT
},
[URTW_8187L_BULK_TX_NORMAL] = {
.type = UE_BULK,
.endpoint = 0x3,
.direction = UE_DIR_OUT,
.bufsize = URTW_TX_MAXSIZE,
.flags = {
.ext_buffer = 1,
.force_short_xfer = 1,
.pipe_bof = 1,
},
.callback = urtw_bulk_tx_callback,
.timeout = URTW_DATA_TIMEOUT
},
};
static struct ieee80211vap *urtw_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode,
int, const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void urtw_vap_delete(struct ieee80211vap *);
-static void urtw_init(void *);
-static void urtw_stop(struct ifnet *);
-static void urtw_stop_locked(struct ifnet *);
-static int urtw_ioctl(struct ifnet *, u_long, caddr_t);
-static void urtw_start(struct ifnet *);
+static void urtw_init(struct urtw_softc *);
+static void urtw_stop(struct urtw_softc *);
+static void urtw_parent(struct ieee80211com *);
+static int urtw_transmit(struct ieee80211com *, struct mbuf *);
+static void urtw_start(struct urtw_softc *);
static int urtw_alloc_rx_data_list(struct urtw_softc *);
static int urtw_alloc_tx_data_list(struct urtw_softc *);
static int urtw_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void urtw_scan_start(struct ieee80211com *);
static void urtw_scan_end(struct ieee80211com *);
static void urtw_set_channel(struct ieee80211com *);
static void urtw_update_mcast(struct ieee80211com *);
static int urtw_tx_start(struct urtw_softc *,
struct ieee80211_node *, struct mbuf *,
struct urtw_data *, int);
static int urtw_newstate(struct ieee80211vap *,
enum ieee80211_state, int);
static void urtw_led_ch(void *);
static void urtw_ledtask(void *, int);
static void urtw_watchdog(void *);
static void urtw_set_multi(void *);
static int urtw_isbmode(uint16_t);
static uint16_t urtw_rate2rtl(uint32_t);
static uint16_t urtw_rtl2rate(uint32_t);
static usb_error_t urtw_set_rate(struct urtw_softc *);
static usb_error_t urtw_update_msr(struct urtw_softc *);
static usb_error_t urtw_read8_c(struct urtw_softc *, int, uint8_t *);
static usb_error_t urtw_read16_c(struct urtw_softc *, int, uint16_t *);
static usb_error_t urtw_read32_c(struct urtw_softc *, int, uint32_t *);
static usb_error_t urtw_write8_c(struct urtw_softc *, int, uint8_t);
static usb_error_t urtw_write16_c(struct urtw_softc *, int, uint16_t);
static usb_error_t urtw_write32_c(struct urtw_softc *, int, uint32_t);
static usb_error_t urtw_eprom_cs(struct urtw_softc *, int);
static usb_error_t urtw_eprom_ck(struct urtw_softc *);
static usb_error_t urtw_eprom_sendbits(struct urtw_softc *, int16_t *,
int);
static usb_error_t urtw_eprom_read32(struct urtw_softc *, uint32_t,
uint32_t *);
static usb_error_t urtw_eprom_readbit(struct urtw_softc *, int16_t *);
static usb_error_t urtw_eprom_writebit(struct urtw_softc *, int16_t);
static usb_error_t urtw_get_macaddr(struct urtw_softc *);
static usb_error_t urtw_get_txpwr(struct urtw_softc *);
static usb_error_t urtw_get_rfchip(struct urtw_softc *);
static usb_error_t urtw_led_init(struct urtw_softc *);
static usb_error_t urtw_8185_rf_pins_enable(struct urtw_softc *);
static usb_error_t urtw_8185_tx_antenna(struct urtw_softc *, uint8_t);
static usb_error_t urtw_8187_write_phy(struct urtw_softc *, uint8_t,
uint32_t);
static usb_error_t urtw_8187_write_phy_ofdm_c(struct urtw_softc *,
uint8_t, uint32_t);
static usb_error_t urtw_8187_write_phy_cck_c(struct urtw_softc *, uint8_t,
uint32_t);
static usb_error_t urtw_8225_setgain(struct urtw_softc *, int16_t);
static usb_error_t urtw_8225_usb_init(struct urtw_softc *);
static usb_error_t urtw_8225_write_c(struct urtw_softc *, uint8_t,
uint16_t);
static usb_error_t urtw_8225_write_s16(struct urtw_softc *, uint8_t, int,
uint16_t *);
static usb_error_t urtw_8225_read(struct urtw_softc *, uint8_t,
uint32_t *);
static usb_error_t urtw_8225_rf_init(struct urtw_softc *);
static usb_error_t urtw_8225_rf_set_chan(struct urtw_softc *, int);
static usb_error_t urtw_8225_rf_set_sens(struct urtw_softc *, int);
static usb_error_t urtw_8225_set_txpwrlvl(struct urtw_softc *, int);
static usb_error_t urtw_8225_rf_stop(struct urtw_softc *);
static usb_error_t urtw_8225v2_rf_init(struct urtw_softc *);
static usb_error_t urtw_8225v2_rf_set_chan(struct urtw_softc *, int);
static usb_error_t urtw_8225v2_set_txpwrlvl(struct urtw_softc *, int);
static usb_error_t urtw_8225v2_setgain(struct urtw_softc *, int16_t);
static usb_error_t urtw_8225_isv2(struct urtw_softc *, int *);
static usb_error_t urtw_8225v2b_rf_init(struct urtw_softc *);
static usb_error_t urtw_8225v2b_rf_set_chan(struct urtw_softc *, int);
static usb_error_t urtw_read8e(struct urtw_softc *, int, uint8_t *);
static usb_error_t urtw_write8e(struct urtw_softc *, int, uint8_t);
static usb_error_t urtw_8180_set_anaparam(struct urtw_softc *, uint32_t);
static usb_error_t urtw_8185_set_anaparam2(struct urtw_softc *, uint32_t);
static usb_error_t urtw_intr_enable(struct urtw_softc *);
static usb_error_t urtw_intr_disable(struct urtw_softc *);
static usb_error_t urtw_reset(struct urtw_softc *);
static usb_error_t urtw_led_on(struct urtw_softc *, int);
static usb_error_t urtw_led_ctl(struct urtw_softc *, int);
static usb_error_t urtw_led_blink(struct urtw_softc *);
static usb_error_t urtw_led_mode0(struct urtw_softc *, int);
static usb_error_t urtw_led_mode1(struct urtw_softc *, int);
static usb_error_t urtw_led_mode2(struct urtw_softc *, int);
static usb_error_t urtw_led_mode3(struct urtw_softc *, int);
static usb_error_t urtw_rx_setconf(struct urtw_softc *);
static usb_error_t urtw_rx_enable(struct urtw_softc *);
static usb_error_t urtw_tx_enable(struct urtw_softc *sc);
static void urtw_free_tx_data_list(struct urtw_softc *);
static void urtw_free_rx_data_list(struct urtw_softc *);
static void urtw_free_data_list(struct urtw_softc *,
struct urtw_data data[], int, int);
static usb_error_t urtw_adapter_start(struct urtw_softc *);
static usb_error_t urtw_adapter_start_b(struct urtw_softc *);
static usb_error_t urtw_set_mode(struct urtw_softc *, uint32_t);
static usb_error_t urtw_8187b_cmd_reset(struct urtw_softc *);
static usb_error_t urtw_do_request(struct urtw_softc *,
struct usb_device_request *, void *);
static usb_error_t urtw_8225v2b_set_txpwrlvl(struct urtw_softc *, int);
static usb_error_t urtw_led_off(struct urtw_softc *, int);
static void urtw_abort_xfers(struct urtw_softc *);
static struct urtw_data *
urtw_getbuf(struct urtw_softc *sc);
static int urtw_compute_txtime(uint16_t, uint16_t, uint8_t,
uint8_t);
static void urtw_updateslot(struct ieee80211com *);
static void urtw_updateslottask(void *, int);
static void urtw_sysctl_node(struct urtw_softc *);
static int
urtw_match(device_t dev)
{
struct usb_attach_arg *uaa = device_get_ivars(dev);
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != URTW_CONFIG_INDEX)
return (ENXIO);
if (uaa->info.bIfaceIndex != URTW_IFACE_INDEX)
return (ENXIO);
return (usbd_lookup_id_by_uaa(urtw_devs, sizeof(urtw_devs), uaa));
}
static int
urtw_attach(device_t dev)
{
const struct usb_config *setup_start;
int ret = ENXIO;
struct urtw_softc *sc = device_get_softc(dev);
struct usb_attach_arg *uaa = device_get_ivars(dev);
- struct ieee80211com *ic;
- struct ifnet *ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t bands, iface_index = URTW_IFACE_INDEX; /* XXX */
uint16_t n_setup;
uint32_t data;
usb_error_t error;
device_set_usb_desc(dev);
sc->sc_dev = dev;
sc->sc_udev = uaa->device;
if (USB_GET_DRIVER_INFO(uaa) == URTW_REV_RTL8187B)
sc->sc_flags |= URTW_RTL8187B;
#ifdef URTW_DEBUG
sc->sc_debug = urtw_debug;
#endif
mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK,
MTX_DEF);
usb_callout_init_mtx(&sc->sc_led_ch, &sc->sc_mtx, 0);
TASK_INIT(&sc->sc_led_task, 0, urtw_ledtask, sc);
TASK_INIT(&sc->sc_updateslot_task, 0, urtw_updateslottask, sc);
callout_init(&sc->sc_watchdog_ch, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
if (sc->sc_flags & URTW_RTL8187B) {
setup_start = urtw_8187b_usbconfig;
n_setup = URTW_8187B_N_XFERS;
} else {
setup_start = urtw_8187l_usbconfig;
n_setup = URTW_8187L_N_XFERS;
}
error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
setup_start, n_setup, sc, &sc->sc_mtx);
if (error) {
device_printf(dev, "could not allocate USB transfers, "
"err=%s\n", usbd_errstr(error));
ret = ENXIO;
goto fail0;
}
if (sc->sc_flags & URTW_RTL8187B) {
sc->sc_tx_dma_buf =
usbd_xfer_get_frame_buffer(sc->sc_xfer[
URTW_8187B_BULK_TX_BE], 0);
} else {
sc->sc_tx_dma_buf =
usbd_xfer_get_frame_buffer(sc->sc_xfer[
URTW_8187L_BULK_TX_LOW], 0);
}
URTW_LOCK(sc);
urtw_read32_m(sc, URTW_RX, &data);
sc->sc_epromtype = (data & URTW_RX_9356SEL) ? URTW_EEPROM_93C56 :
URTW_EEPROM_93C46;
error = urtw_get_rfchip(sc);
if (error != 0)
goto fail;
error = urtw_get_macaddr(sc);
if (error != 0)
goto fail;
error = urtw_get_txpwr(sc);
if (error != 0)
goto fail;
error = urtw_led_init(sc);
if (error != 0)
goto fail;
URTW_UNLOCK(sc);
sc->sc_rts_retry = URTW_DEFAULT_RTS_RETRY;
sc->sc_tx_retry = URTW_DEFAULT_TX_RETRY;
sc->sc_currate = 3;
sc->sc_preamble_mode = urtw_preamble_mode;
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not allocate ifnet\n");
- ret = ENOMEM;
- goto fail1;
- }
-
- ifp->if_softc = sc;
- if_initname(ifp, "urtw", device_get_unit(sc->sc_dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = urtw_init;
- ifp->if_ioctl = urtw_ioctl;
- ifp->if_start = urtw_start;
- /* XXX URTW_TX_DATA_LIST_COUNT */
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic = ifp->if_l2com;
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA | /* station mode */
IEEE80211_C_MONITOR | /* monitor mode supported */
IEEE80211_C_TXPMGT | /* tx power management */
IEEE80211_C_SHPREAMBLE | /* short preamble supported */
IEEE80211_C_SHSLOT | /* short slot time supported */
IEEE80211_C_BGSCAN | /* capable of bg scanning */
IEEE80211_C_WPA; /* 802.11i */
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, sc->sc_bssid);
+ ieee80211_ifattach(ic);
ic->ic_raw_xmit = urtw_raw_xmit;
ic->ic_scan_start = urtw_scan_start;
ic->ic_scan_end = urtw_scan_end;
ic->ic_set_channel = urtw_set_channel;
ic->ic_updateslot = urtw_updateslot;
ic->ic_vap_create = urtw_vap_create;
ic->ic_vap_delete = urtw_vap_delete;
ic->ic_update_mcast = urtw_update_mcast;
+ ic->ic_parent = urtw_parent;
+ ic->ic_transmit = urtw_transmit;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
URTW_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
URTW_RX_RADIOTAP_PRESENT);
urtw_sysctl_node(sc);
if (bootverbose)
ieee80211_announce(ic);
return (0);
-fail: URTW_UNLOCK(sc);
-fail1: usbd_transfer_unsetup(sc->sc_xfer, (sc->sc_flags & URTW_RTL8187B) ?
+fail:
+ URTW_UNLOCK(sc);
+ usbd_transfer_unsetup(sc->sc_xfer, (sc->sc_flags & URTW_RTL8187B) ?
URTW_8187B_N_XFERS : URTW_8187L_N_XFERS);
fail0:
return (ret);
}
static int
urtw_detach(device_t dev)
{
struct urtw_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
unsigned int x;
unsigned int n_xfers;
/* Prevent further ioctls */
URTW_LOCK(sc);
sc->sc_flags |= URTW_DETACHED;
+ urtw_stop(sc);
URTW_UNLOCK(sc);
- urtw_stop(ifp);
-
ieee80211_draintask(ic, &sc->sc_updateslot_task);
ieee80211_draintask(ic, &sc->sc_led_task);
usb_callout_drain(&sc->sc_led_ch);
callout_drain(&sc->sc_watchdog_ch);
n_xfers = (sc->sc_flags & URTW_RTL8187B) ?
URTW_8187B_N_XFERS : URTW_8187L_N_XFERS;
/* prevent further allocations from RX/TX data lists */
URTW_LOCK(sc);
STAILQ_INIT(&sc->sc_tx_active);
STAILQ_INIT(&sc->sc_tx_inactive);
STAILQ_INIT(&sc->sc_tx_pending);
STAILQ_INIT(&sc->sc_rx_active);
STAILQ_INIT(&sc->sc_rx_inactive);
URTW_UNLOCK(sc);
/* drain USB transfers */
for (x = 0; x != n_xfers; x++)
usbd_transfer_drain(sc->sc_xfer[x]);
/* free data buffers */
URTW_LOCK(sc);
urtw_free_tx_data_list(sc);
urtw_free_rx_data_list(sc);
URTW_UNLOCK(sc);
/* free USB transfers and some data buffers */
usbd_transfer_unsetup(sc->sc_xfer, n_xfers);
ieee80211_ifdetach(ic);
- if_free(ifp);
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static void
urtw_free_tx_data_list(struct urtw_softc *sc)
{
urtw_free_data_list(sc, sc->sc_tx, URTW_TX_DATA_LIST_COUNT, 0);
}
static void
urtw_free_rx_data_list(struct urtw_softc *sc)
{
urtw_free_data_list(sc, sc->sc_rx, URTW_RX_DATA_LIST_COUNT, 1);
}
static void
urtw_free_data_list(struct urtw_softc *sc, struct urtw_data data[], int ndata,
int fillmbuf)
{
int i;
for (i = 0; i < ndata; i++) {
struct urtw_data *dp = &data[i];
if (fillmbuf == 1) {
if (dp->m != NULL) {
m_freem(dp->m);
dp->m = NULL;
dp->buf = NULL;
}
} else {
dp->buf = NULL;
}
if (dp->ni != NULL) {
ieee80211_free_node(dp->ni);
dp->ni = NULL;
}
}
}
static struct ieee80211vap *
urtw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct urtw_vap *uvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return (NULL);
- uvp = (struct urtw_vap *) malloc(sizeof(struct urtw_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (uvp == NULL)
- return (NULL);
+ uvp = malloc(sizeof(struct urtw_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &uvp->vap;
/* enable s/w bmiss handling for sta mode */
if (ieee80211_vap_setup(ic, vap, name, unit, opmode,
- flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) {
+ flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) {
/* out of memory */
free(uvp, M_80211_VAP);
return (NULL);
}
/* override state transition machine */
uvp->newstate = vap->iv_newstate;
vap->iv_newstate = urtw_newstate;
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
- ieee80211_media_status);
+ ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return (vap);
}
static void
urtw_vap_delete(struct ieee80211vap *vap)
{
struct urtw_vap *uvp = URTW_VAP(vap);
ieee80211_vap_detach(vap);
free(uvp, M_80211_VAP);
}
static void
-urtw_init_locked(void *arg)
+urtw_init(struct urtw_softc *sc)
{
- int ret;
- struct urtw_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
usb_error_t error;
+ int ret;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- urtw_stop_locked(ifp);
+ URTW_ASSERT_LOCKED(sc);
+ if (sc->sc_flags & URTW_RUNNING)
+ urtw_stop(sc);
+
error = (sc->sc_flags & URTW_RTL8187B) ? urtw_adapter_start_b(sc) :
urtw_adapter_start(sc);
if (error != 0)
goto fail;
/* reset softc variables */
sc->sc_txtimer = 0;
if (!(sc->sc_flags & URTW_INIT_ONCE)) {
ret = urtw_alloc_rx_data_list(sc);
if (ret != 0)
goto fail;
ret = urtw_alloc_tx_data_list(sc);
if (ret != 0)
goto fail;
sc->sc_flags |= URTW_INIT_ONCE;
}
error = urtw_rx_enable(sc);
if (error != 0)
goto fail;
error = urtw_tx_enable(sc);
if (error != 0)
goto fail;
if (sc->sc_flags & URTW_RTL8187B)
usbd_transfer_start(sc->sc_xfer[URTW_8187B_BULK_TX_STATUS]);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_flags |= URTW_RUNNING;
callout_reset(&sc->sc_watchdog_ch, hz, urtw_watchdog, sc);
fail:
return;
}
-static void
-urtw_init(void *arg)
-{
- struct urtw_softc *sc = arg;
-
- URTW_LOCK(sc);
- urtw_init_locked(arg);
- URTW_UNLOCK(sc);
-}
-
static usb_error_t
urtw_adapter_start_b(struct urtw_softc *sc)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
uint8_t data8;
usb_error_t error;
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
urtw_read8_m(sc, URTW_CONFIG3, &data8);
urtw_write8_m(sc, URTW_CONFIG3,
data8 | URTW_CONFIG3_ANAPARAM_WRITE | URTW_CONFIG3_GNT_SELECT);
urtw_write32_m(sc, URTW_ANAPARAM2, URTW_8187B_8225_ANAPARAM2_ON);
urtw_write32_m(sc, URTW_ANAPARAM, URTW_8187B_8225_ANAPARAM_ON);
urtw_write8_m(sc, URTW_ANAPARAM3, URTW_8187B_8225_ANAPARAM3_ON);
urtw_write8_m(sc, 0x61, 0x10);
urtw_read8_m(sc, 0x62, &data8);
urtw_write8_m(sc, 0x62, data8 & ~(1 << 5));
urtw_write8_m(sc, 0x62, data8 | (1 << 5));
urtw_read8_m(sc, URTW_CONFIG3, &data8);
data8 &= ~URTW_CONFIG3_ANAPARAM_WRITE;
urtw_write8_m(sc, URTW_CONFIG3, data8);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
error = urtw_8187b_cmd_reset(sc);
if (error)
goto fail;
error = sc->sc_rf_init(sc);
if (error != 0)
goto fail;
urtw_write8_m(sc, URTW_CMD, URTW_CMD_RX_ENABLE | URTW_CMD_TX_ENABLE);
/* fix RTL8187B RX stall */
error = urtw_intr_enable(sc);
if (error)
goto fail;
error = urtw_write8e(sc, 0x41, 0xf4);
if (error)
goto fail;
error = urtw_write8e(sc, 0x40, 0x00);
if (error)
goto fail;
error = urtw_write8e(sc, 0x42, 0x00);
if (error)
goto fail;
error = urtw_write8e(sc, 0x42, 0x01);
if (error)
goto fail;
error = urtw_write8e(sc, 0x40, 0x0f);
if (error)
goto fail;
error = urtw_write8e(sc, 0x42, 0x00);
if (error)
goto fail;
error = urtw_write8e(sc, 0x42, 0x01);
if (error)
goto fail;
urtw_read8_m(sc, 0xdb, &data8);
urtw_write8_m(sc, 0xdb, data8 | (1 << 2));
urtw_write16_m(sc, 0x372, 0x59fa);
urtw_write16_m(sc, 0x374, 0x59d2);
urtw_write16_m(sc, 0x376, 0x59d2);
urtw_write16_m(sc, 0x378, 0x19fa);
urtw_write16_m(sc, 0x37a, 0x19fa);
urtw_write16_m(sc, 0x37c, 0x00d0);
urtw_write8_m(sc, 0x61, 0);
urtw_write8_m(sc, 0x180, 0x0f);
urtw_write8_m(sc, 0x183, 0x03);
urtw_write8_m(sc, 0xda, 0x10);
urtw_write8_m(sc, 0x24d, 0x08);
urtw_write32_m(sc, URTW_HSSI_PARA, 0x0600321b);
urtw_write16_m(sc, 0x1ec, 0x800); /* RX MAX SIZE */
fail:
return (error);
#undef N
}
static usb_error_t
urtw_adapter_start(struct urtw_softc *sc)
{
+ struct ieee80211com *ic = &sc->sc_ic;
usb_error_t error;
error = urtw_reset(sc);
if (error)
goto fail;
urtw_write8_m(sc, URTW_ADDR_MAGIC1, 0);
urtw_write8_m(sc, URTW_GPIO, 0);
/* for led */
urtw_write8_m(sc, URTW_ADDR_MAGIC1, 4);
error = urtw_led_ctl(sc, URTW_LED_CTL_POWER_ON);
if (error != 0)
goto fail;
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
/* applying MAC address again. */
- urtw_write32_m(sc, URTW_MAC0, ((uint32_t *)sc->sc_bssid)[0]);
- urtw_write16_m(sc, URTW_MAC4, ((uint32_t *)sc->sc_bssid)[1] & 0xffff);
+ urtw_write32_m(sc, URTW_MAC0, ((uint32_t *)ic->ic_macaddr)[0]);
+ urtw_write16_m(sc, URTW_MAC4, ((uint32_t *)ic->ic_macaddr)[1] & 0xffff);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
error = urtw_update_msr(sc);
if (error)
goto fail;
urtw_write32_m(sc, URTW_INT_TIMEOUT, 0);
urtw_write8_m(sc, URTW_WPA_CONFIG, 0);
urtw_write8_m(sc, URTW_RATE_FALLBACK, URTW_RATE_FALLBACK_ENABLE | 0x1);
error = urtw_set_rate(sc);
if (error != 0)
goto fail;
error = sc->sc_rf_init(sc);
if (error != 0)
goto fail;
if (sc->sc_rf_set_sens != NULL)
sc->sc_rf_set_sens(sc, sc->sc_sens);
/* XXX correct? to call write16 */
urtw_write16_m(sc, URTW_PSR, 1);
urtw_write16_m(sc, URTW_ADDR_MAGIC2, 0x10);
urtw_write8_m(sc, URTW_TALLY_SEL, 0x80);
urtw_write8_m(sc, URTW_ADDR_MAGIC3, 0x60);
/* XXX correct? to call write16 */
urtw_write16_m(sc, URTW_PSR, 0);
urtw_write8_m(sc, URTW_ADDR_MAGIC1, 4);
error = urtw_intr_enable(sc);
if (error != 0)
goto fail;
fail:
return (error);
}
static usb_error_t
urtw_set_mode(struct urtw_softc *sc, uint32_t mode)
{
uint8_t data;
usb_error_t error;
urtw_read8_m(sc, URTW_EPROM_CMD, &data);
data = (data & ~URTW_EPROM_CMD_MASK) | (mode << URTW_EPROM_CMD_SHIFT);
data = data & ~(URTW_EPROM_CS | URTW_EPROM_CK);
urtw_write8_m(sc, URTW_EPROM_CMD, data);
fail:
return (error);
}
static usb_error_t
urtw_8187b_cmd_reset(struct urtw_softc *sc)
{
int i;
uint8_t data8;
usb_error_t error;
/* XXX the code can be duplicate with urtw_reset(). */
urtw_read8_m(sc, URTW_CMD, &data8);
data8 = (data8 & 0x2) | URTW_CMD_RST;
urtw_write8_m(sc, URTW_CMD, data8);
for (i = 0; i < 20; i++) {
usb_pause_mtx(&sc->sc_mtx, 2);
urtw_read8_m(sc, URTW_CMD, &data8);
if (!(data8 & URTW_CMD_RST))
break;
}
if (i >= 20) {
device_printf(sc->sc_dev, "reset timeout\n");
goto fail;
}
fail:
return (error);
}
static usb_error_t
urtw_do_request(struct urtw_softc *sc,
struct usb_device_request *req, void *data)
{
usb_error_t err;
int ntries = 10;
URTW_ASSERT_LOCKED(sc);
while (ntries--) {
err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
req, data, 0, NULL, 250 /* ms */);
if (err == 0)
break;
DPRINTF(sc, URTW_DEBUG_INIT,
"Control request failed, %s (retrying)\n",
usbd_errstr(err));
usb_pause_mtx(&sc->sc_mtx, hz / 100);
}
return (err);
}
static void
-urtw_stop_locked(struct ifnet *ifp)
+urtw_stop(struct urtw_softc *sc)
{
- struct urtw_softc *sc = ifp->if_softc;
uint8_t data8;
usb_error_t error;
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ URTW_ASSERT_LOCKED(sc);
+ sc->sc_flags &= ~URTW_RUNNING;
+
error = urtw_intr_disable(sc);
if (error)
goto fail;
urtw_read8_m(sc, URTW_CMD, &data8);
data8 &= ~(URTW_CMD_RX_ENABLE | URTW_CMD_TX_ENABLE);
urtw_write8_m(sc, URTW_CMD, data8);
error = sc->sc_rf_stop(sc);
if (error != 0)
goto fail;
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
urtw_read8_m(sc, URTW_CONFIG4, &data8);
urtw_write8_m(sc, URTW_CONFIG4, data8 | URTW_CONFIG4_VCOOFF);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
fail:
if (error)
device_printf(sc->sc_dev, "failed to stop (%s)\n",
usbd_errstr(error));
usb_callout_stop(&sc->sc_led_ch);
callout_stop(&sc->sc_watchdog_ch);
urtw_abort_xfers(sc);
}
static void
-urtw_stop(struct ifnet *ifp)
-{
- struct urtw_softc *sc = ifp->if_softc;
-
- URTW_LOCK(sc);
- urtw_stop_locked(ifp);
- URTW_UNLOCK(sc);
-}
-
-static void
urtw_abort_xfers(struct urtw_softc *sc)
{
int i, max;
URTW_ASSERT_LOCKED(sc);
max = (sc->sc_flags & URTW_RTL8187B) ? URTW_8187B_N_XFERS :
URTW_8187L_N_XFERS;
/* abort any pending transfers */
for (i = 0; i < max; i++)
usbd_transfer_stop(sc->sc_xfer[i]);
}
-static int
-urtw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+urtw_parent(struct ieee80211com *ic)
{
- struct ieee80211com *ic = ifp->if_l2com;
struct urtw_softc *sc = ic->ic_softc;
- struct ifreq *ifr = (struct ifreq *) data;
- int error;
int startall = 0;
URTW_LOCK(sc);
- error = (sc->sc_flags & URTW_DETACHED) ? ENXIO : 0;
- URTW_UNLOCK(sc);
- if (error)
- return (error);
+ if (sc->sc_flags & URTW_DETACHED) {
+ URTW_UNLOCK(sc);
+ return;
+ }
- switch (cmd) {
- case SIOCSIFFLAGS:
- if (ifp->if_flags & IFF_UP) {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if ((ifp->if_flags ^ sc->sc_if_flags) &
- (IFF_ALLMULTI | IFF_PROMISC))
- urtw_set_multi(sc);
- } else {
- urtw_init(sc);
- startall = 1;
- }
+ if (ic->ic_nrunning > 0) {
+ if (sc->sc_flags & URTW_RUNNING) {
+ if (ic->ic_promisc > 0 || ic->ic_allmulti > 0)
+ urtw_set_multi(sc);
} else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- urtw_stop(ifp);
+ urtw_init(sc);
+ startall = 1;
}
- sc->sc_if_flags = ifp->if_flags;
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
+ } else if (sc->sc_flags & URTW_RUNNING)
+ urtw_stop(sc);
+ URTW_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
+}
+
+static int
+urtw_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct urtw_softc *sc = ic->ic_softc;
+ int error;
+
+ URTW_LOCK(sc);
+ if ((sc->sc_flags & URTW_RUNNING) == 0) {
+ URTW_UNLOCK(sc);
+ return (ENXIO);
}
- return (error);
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ URTW_UNLOCK(sc);
+ return (error);
+ }
+ urtw_start(sc);
+ URTW_UNLOCK(sc);
+
+ return (0);
}
static void
-urtw_start(struct ifnet *ifp)
+urtw_start(struct urtw_softc *sc)
{
struct urtw_data *bf;
- struct urtw_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ URTW_ASSERT_LOCKED(sc);
+
+ if ((sc->sc_flags & URTW_RUNNING) == 0)
return;
- URTW_LOCK(sc);
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
bf = urtw_getbuf(sc);
if (bf == NULL) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
+ mbufq_prepend(&sc->sc_snd, m);
break;
}
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
m->m_pkthdr.rcvif = NULL;
if (urtw_tx_start(sc, ni, m, bf, URTW_PRIORITY_NORMAL) != 0) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
ieee80211_free_node(ni);
break;
}
sc->sc_txtimer = 5;
callout_reset(&sc->sc_watchdog_ch, hz, urtw_watchdog, sc);
}
- URTW_UNLOCK(sc);
}
static int
urtw_alloc_data_list(struct urtw_softc *sc, struct urtw_data data[],
int ndata, int maxsz, void *dma_buf)
{
int i, error;
for (i = 0; i < ndata; i++) {
struct urtw_data *dp = &data[i];
dp->sc = sc;
if (dma_buf == NULL) {
dp->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (dp->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
error = ENOMEM;
goto fail;
}
dp->buf = mtod(dp->m, uint8_t *);
} else {
dp->m = NULL;
dp->buf = ((uint8_t *)dma_buf) +
(i * maxsz);
}
dp->ni = NULL;
}
return (0);
fail: urtw_free_data_list(sc, data, ndata, 1);
return (error);
}
static int
urtw_alloc_rx_data_list(struct urtw_softc *sc)
{
int error, i;
error = urtw_alloc_data_list(sc,
sc->sc_rx, URTW_RX_DATA_LIST_COUNT,
MCLBYTES, NULL /* mbufs */);
if (error != 0)
return (error);
STAILQ_INIT(&sc->sc_rx_active);
STAILQ_INIT(&sc->sc_rx_inactive);
for (i = 0; i < URTW_RX_DATA_LIST_COUNT; i++)
STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next);
return (0);
}
static int
urtw_alloc_tx_data_list(struct urtw_softc *sc)
{
int error, i;
error = urtw_alloc_data_list(sc,
sc->sc_tx, URTW_TX_DATA_LIST_COUNT, URTW_TX_MAXSIZE,
sc->sc_tx_dma_buf /* no mbufs */);
if (error != 0)
return (error);
STAILQ_INIT(&sc->sc_tx_active);
STAILQ_INIT(&sc->sc_tx_inactive);
STAILQ_INIT(&sc->sc_tx_pending);
for (i = 0; i < URTW_TX_DATA_LIST_COUNT; i++)
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i],
next);
return (0);
}
static int
urtw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
- struct urtw_data *bf;
struct urtw_softc *sc = ic->ic_softc;
+ struct urtw_data *bf;
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(sc->sc_flags & URTW_RUNNING)) {
m_freem(m);
ieee80211_free_node(ni);
return ENETDOWN;
}
URTW_LOCK(sc);
bf = urtw_getbuf(sc);
if (bf == NULL) {
ieee80211_free_node(ni);
m_freem(m);
URTW_UNLOCK(sc);
return (ENOBUFS); /* XXX */
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if (urtw_tx_start(sc, ni, m, bf, URTW_PRIORITY_LOW) != 0) {
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
URTW_UNLOCK(sc);
return (EIO);
}
URTW_UNLOCK(sc);
sc->sc_txtimer = 5;
return (0);
}
static void
urtw_scan_start(struct ieee80211com *ic)
{
/* XXX do nothing? */
}
static void
urtw_scan_end(struct ieee80211com *ic)
{
/* XXX do nothing? */
}
static void
urtw_set_channel(struct ieee80211com *ic)
{
- struct urtw_softc *sc = ic->ic_softc;
- struct ifnet *ifp = sc->sc_ifp;
+ struct urtw_softc *sc = ic->ic_softc;
uint32_t data, orig;
usb_error_t error;
/*
* if the user set a channel explicitly using ifconfig(8) this function
* can be called earlier than we're expected that in some cases the
* initialization would be failed if setting a channel is called before
* the init have done.
*/
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ if (!(sc->sc_flags & URTW_RUNNING))
return;
if (sc->sc_curchan != NULL && sc->sc_curchan == ic->ic_curchan)
return;
URTW_LOCK(sc);
/*
* during changing th channel we need to temporarily be disable
* TX.
*/
urtw_read32_m(sc, URTW_TX_CONF, &orig);
data = orig & ~URTW_TX_LOOPBACK_MASK;
urtw_write32_m(sc, URTW_TX_CONF, data | URTW_TX_LOOPBACK_MAC);
error = sc->sc_rf_set_chan(sc, ieee80211_chan2ieee(ic, ic->ic_curchan));
if (error != 0)
goto fail;
usb_pause_mtx(&sc->sc_mtx, 10);
urtw_write32_m(sc, URTW_TX_CONF, orig);
urtw_write16_m(sc, URTW_ATIM_WND, 2);
urtw_write16_m(sc, URTW_ATIM_TR_ITV, 100);
urtw_write16_m(sc, URTW_BEACON_INTERVAL, 100);
urtw_write16_m(sc, URTW_BEACON_INTERVAL_TIME, 100);
fail:
URTW_UNLOCK(sc);
sc->sc_curchan = ic->ic_curchan;
if (error != 0)
device_printf(sc->sc_dev, "could not change the channel\n");
}
static void
urtw_update_mcast(struct ieee80211com *ic)
{
/* XXX do nothing? */
}
static int
urtw_tx_start(struct urtw_softc *sc, struct ieee80211_node *ni, struct mbuf *m0,
struct urtw_data *data, int prior)
{
- struct ifnet *ifp = sc->sc_ifp;
struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
struct ieee80211_key *k;
const struct ieee80211_txparam *tp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct usb_xfer *rtl8187b_pipes[URTW_8187B_TXPIPE_MAX] = {
sc->sc_xfer[URTW_8187B_BULK_TX_BE],
sc->sc_xfer[URTW_8187B_BULK_TX_BK],
sc->sc_xfer[URTW_8187B_BULK_TX_VI],
sc->sc_xfer[URTW_8187B_BULK_TX_VO]
};
struct usb_xfer *xfer;
int dur = 0, rtsdur = 0, rtsenable = 0, ctsenable = 0, rate,
pkttime = 0, txdur = 0, isshort = 0, xferlen;
uint16_t acktime, rtstime, ctstime;
uint32_t flags;
usb_error_t error;
URTW_ASSERT_LOCKED(sc);
/*
* Software crypto.
*/
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
device_printf(sc->sc_dev,
"ieee80211_crypto_encap returns NULL.\n");
/* XXX we don't expect the fragmented frames */
m_freem(m0);
return (ENOBUFS);
}
/* in case packet header moved, reset pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
if (ieee80211_radiotap_active_vap(vap)) {
struct urtw_tx_radiotap_header *tap = &sc->sc_txtap;
/* XXX Are variables correct? */
tap->wt_flags = 0;
tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq);
tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags);
ieee80211_radiotap_tx(vap, m0);
}
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT ||
(wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
rate = tp->mgmtrate;
} else {
tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
/* for data frames */
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
rate = tp->mcastrate;
else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rate = tp->ucastrate;
else
rate = urtw_rtl2rate(sc->sc_currate);
}
sc->sc_stats.txrates[sc->sc_currate]++;
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
txdur = pkttime = urtw_compute_txtime(m0->m_pkthdr.len +
IEEE80211_CRC_LEN, rate, 0, 0);
else {
acktime = urtw_compute_txtime(14, 2,0, 0);
if ((m0->m_pkthdr.len + 4) > vap->iv_rtsthreshold) {
rtsenable = 1;
ctsenable = 0;
rtstime = urtw_compute_txtime(URTW_ACKCTS_LEN, 2, 0, 0);
ctstime = urtw_compute_txtime(14, 2, 0, 0);
pkttime = urtw_compute_txtime(m0->m_pkthdr.len +
IEEE80211_CRC_LEN, rate, 0, isshort);
rtsdur = ctstime + pkttime + acktime +
3 * URTW_ASIFS_TIME;
txdur = rtstime + rtsdur;
} else {
rtsenable = ctsenable = rtsdur = 0;
pkttime = urtw_compute_txtime(m0->m_pkthdr.len +
IEEE80211_CRC_LEN, rate, 0, isshort);
txdur = pkttime + URTW_ASIFS_TIME + acktime;
}
if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
dur = urtw_compute_txtime(m0->m_pkthdr.len +
IEEE80211_CRC_LEN, rate, 0, isshort) +
3 * URTW_ASIFS_TIME +
2 * acktime;
else
dur = URTW_ASIFS_TIME + acktime;
}
USETW(wh->i_dur, dur);
xferlen = m0->m_pkthdr.len;
xferlen += (sc->sc_flags & URTW_RTL8187B) ? (4 * 8) : (4 * 3);
if ((0 == xferlen % 64) || (0 == xferlen % 512))
xferlen += 1;
memset(data->buf, 0, URTW_TX_MAXSIZE);
flags = m0->m_pkthdr.len & 0xfff;
flags |= URTW_TX_FLAG_NO_ENC;
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
(ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE) &&
(sc->sc_preamble_mode == URTW_PREAMBLE_MODE_SHORT) &&
(sc->sc_currate != 0))
flags |= URTW_TX_FLAG_SPLCP;
if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
flags |= URTW_TX_FLAG_MOREFRAG;
flags |= (sc->sc_currate & 0xf) << URTW_TX_FLAG_TXRATE_SHIFT;
if (sc->sc_flags & URTW_RTL8187B) {
struct urtw_8187b_txhdr *tx;
tx = (struct urtw_8187b_txhdr *)data->buf;
if (ctsenable)
flags |= URTW_TX_FLAG_CTS;
if (rtsenable) {
flags |= URTW_TX_FLAG_RTS;
flags |= (urtw_rate2rtl(11) & 0xf) <<
URTW_TX_FLAG_RTSRATE_SHIFT;
tx->rtsdur = rtsdur;
}
tx->flag = htole32(flags);
tx->txdur = txdur;
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
IEEE80211_FC0_TYPE_MGT &&
(wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
IEEE80211_FC0_SUBTYPE_PROBE_RESP)
tx->retry = 1;
else
tx->retry = URTW_TX_MAXRETRY;
m_copydata(m0, 0, m0->m_pkthdr.len, (uint8_t *)(tx + 1));
} else {
struct urtw_8187l_txhdr *tx;
tx = (struct urtw_8187l_txhdr *)data->buf;
if (rtsenable) {
flags |= URTW_TX_FLAG_RTS;
tx->rtsdur = rtsdur;
}
flags |= (urtw_rate2rtl(11) & 0xf) << URTW_TX_FLAG_RTSRATE_SHIFT;
tx->flag = htole32(flags);
tx->retry = 3; /* CW minimum */
tx->retry = 7 << 4; /* CW maximum */
tx->retry = URTW_TX_MAXRETRY << 8; /* retry limitation */
m_copydata(m0, 0, m0->m_pkthdr.len, (uint8_t *)(tx + 1));
}
data->buflen = xferlen;
data->ni = ni;
data->m = m0;
if (sc->sc_flags & URTW_RTL8187B) {
switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
case IEEE80211_FC0_TYPE_CTL:
case IEEE80211_FC0_TYPE_MGT:
xfer = sc->sc_xfer[URTW_8187B_BULK_TX_EP12];
break;
default:
KASSERT(M_WME_GETAC(m0) < URTW_8187B_TXPIPE_MAX,
("unsupported WME pipe %d", M_WME_GETAC(m0)));
xfer = rtl8187b_pipes[M_WME_GETAC(m0)];
break;
}
} else
xfer = (prior == URTW_PRIORITY_LOW) ?
sc->sc_xfer[URTW_8187L_BULK_TX_LOW] :
sc->sc_xfer[URTW_8187L_BULK_TX_NORMAL];
STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next);
usbd_transfer_start(xfer);
error = urtw_led_ctl(sc, URTW_LED_CTL_TX);
if (error != 0)
device_printf(sc->sc_dev, "could not control LED (%d)\n",
error);
return (0);
}
static int
urtw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ieee80211com *ic = vap->iv_ic;
struct urtw_softc *sc = ic->ic_softc;
struct urtw_vap *uvp = URTW_VAP(vap);
struct ieee80211_node *ni;
usb_error_t error = 0;
DPRINTF(sc, URTW_DEBUG_STATE, "%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]);
sc->sc_state = nstate;
IEEE80211_UNLOCK(ic);
URTW_LOCK(sc);
usb_callout_stop(&sc->sc_led_ch);
callout_stop(&sc->sc_watchdog_ch);
switch (nstate) {
case IEEE80211_S_INIT:
case IEEE80211_S_SCAN:
case IEEE80211_S_AUTH:
case IEEE80211_S_ASSOC:
break;
case IEEE80211_S_RUN:
ni = ieee80211_ref_node(vap->iv_bss);
/* setting bssid. */
urtw_write32_m(sc, URTW_BSSID, ((uint32_t *)ni->ni_bssid)[0]);
urtw_write16_m(sc, URTW_BSSID + 4,
((uint16_t *)ni->ni_bssid)[2]);
urtw_update_msr(sc);
/* XXX maybe the below would be incorrect. */
urtw_write16_m(sc, URTW_ATIM_WND, 2);
urtw_write16_m(sc, URTW_ATIM_TR_ITV, 100);
urtw_write16_m(sc, URTW_BEACON_INTERVAL, 0x64);
urtw_write16_m(sc, URTW_BEACON_INTERVAL_TIME, 100);
error = urtw_led_ctl(sc, URTW_LED_CTL_LINK);
if (error != 0)
device_printf(sc->sc_dev,
"could not control LED (%d)\n", error);
ieee80211_free_node(ni);
break;
default:
break;
}
fail:
URTW_UNLOCK(sc);
IEEE80211_LOCK(ic);
return (uvp->newstate(vap, nstate, arg));
}
static void
urtw_watchdog(void *arg)
{
struct urtw_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
if (sc->sc_txtimer > 0) {
if (--sc->sc_txtimer == 0) {
device_printf(sc->sc_dev, "device timeout\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
return;
}
callout_reset(&sc->sc_watchdog_ch, hz, urtw_watchdog, sc);
}
}
static void
urtw_set_multi(void *arg)
{
- struct urtw_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
-
- if (!(ifp->if_flags & IFF_UP))
- return;
-
- /*
- * XXX don't know how to set a device. Lack of docs. Just try to set
- * IFF_ALLMULTI flag here.
- */
- ifp->if_flags |= IFF_ALLMULTI;
+ /* XXX don't know how to set a device. Lack of docs. */
}
static usb_error_t
urtw_set_rate(struct urtw_softc *sc)
{
int i, basic_rate, min_rr_rate, max_rr_rate;
uint16_t data;
usb_error_t error;
basic_rate = urtw_rate2rtl(48);
min_rr_rate = urtw_rate2rtl(12);
max_rr_rate = urtw_rate2rtl(48);
urtw_write8_m(sc, URTW_RESP_RATE,
max_rr_rate << URTW_RESP_MAX_RATE_SHIFT |
min_rr_rate << URTW_RESP_MIN_RATE_SHIFT);
urtw_read16_m(sc, URTW_BRSR, &data);
data &= ~URTW_BRSR_MBR_8185;
for (i = 0; i <= basic_rate; i++)
data |= (1 << i);
urtw_write16_m(sc, URTW_BRSR, data);
fail:
return (error);
}
static uint16_t
urtw_rate2rtl(uint32_t rate)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
int i;
for (i = 0; i < N(urtw_ratetable); i++) {
if (rate == urtw_ratetable[i].reg)
return urtw_ratetable[i].val;
}
return (3);
#undef N
}
static uint16_t
urtw_rtl2rate(uint32_t rate)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
int i;
for (i = 0; i < N(urtw_ratetable); i++) {
if (rate == urtw_ratetable[i].val)
return urtw_ratetable[i].reg;
}
return (0);
#undef N
}
static usb_error_t
urtw_update_msr(struct urtw_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t data;
usb_error_t error;
urtw_read8_m(sc, URTW_MSR, &data);
data &= ~URTW_MSR_LINK_MASK;
if (sc->sc_state == IEEE80211_S_RUN) {
switch (ic->ic_opmode) {
case IEEE80211_M_STA:
case IEEE80211_M_MONITOR:
data |= URTW_MSR_LINK_STA;
if (sc->sc_flags & URTW_RTL8187B)
data |= URTW_MSR_LINK_ENEDCA;
break;
case IEEE80211_M_IBSS:
data |= URTW_MSR_LINK_ADHOC;
break;
case IEEE80211_M_HOSTAP:
data |= URTW_MSR_LINK_HOSTAP;
break;
default:
DPRINTF(sc, URTW_DEBUG_STATE,
"unsupported operation mode 0x%x\n",
ic->ic_opmode);
error = USB_ERR_INVAL;
goto fail;
}
} else
data |= URTW_MSR_LINK_NONE;
urtw_write8_m(sc, URTW_MSR, data);
fail:
return (error);
}
static usb_error_t
urtw_read8_c(struct urtw_softc *sc, int val, uint8_t *data)
{
struct usb_device_request req;
usb_error_t error;
URTW_ASSERT_LOCKED(sc);
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = URTW_8187_GETREGS_REQ;
USETW(req.wValue, (val & 0xff) | 0xff00);
USETW(req.wIndex, (val >> 8) & 0x3);
USETW(req.wLength, sizeof(uint8_t));
error = urtw_do_request(sc, &req, data);
return (error);
}
static usb_error_t
urtw_read16_c(struct urtw_softc *sc, int val, uint16_t *data)
{
struct usb_device_request req;
usb_error_t error;
URTW_ASSERT_LOCKED(sc);
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = URTW_8187_GETREGS_REQ;
USETW(req.wValue, (val & 0xff) | 0xff00);
USETW(req.wIndex, (val >> 8) & 0x3);
USETW(req.wLength, sizeof(uint16_t));
error = urtw_do_request(sc, &req, data);
return (error);
}
static usb_error_t
urtw_read32_c(struct urtw_softc *sc, int val, uint32_t *data)
{
struct usb_device_request req;
usb_error_t error;
URTW_ASSERT_LOCKED(sc);
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = URTW_8187_GETREGS_REQ;
USETW(req.wValue, (val & 0xff) | 0xff00);
USETW(req.wIndex, (val >> 8) & 0x3);
USETW(req.wLength, sizeof(uint32_t));
error = urtw_do_request(sc, &req, data);
return (error);
}
static usb_error_t
urtw_write8_c(struct urtw_softc *sc, int val, uint8_t data)
{
struct usb_device_request req;
URTW_ASSERT_LOCKED(sc);
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = URTW_8187_SETREGS_REQ;
USETW(req.wValue, (val & 0xff) | 0xff00);
USETW(req.wIndex, (val >> 8) & 0x3);
USETW(req.wLength, sizeof(uint8_t));
return (urtw_do_request(sc, &req, &data));
}
static usb_error_t
urtw_write16_c(struct urtw_softc *sc, int val, uint16_t data)
{
struct usb_device_request req;
URTW_ASSERT_LOCKED(sc);
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = URTW_8187_SETREGS_REQ;
USETW(req.wValue, (val & 0xff) | 0xff00);
USETW(req.wIndex, (val >> 8) & 0x3);
USETW(req.wLength, sizeof(uint16_t));
return (urtw_do_request(sc, &req, &data));
}
static usb_error_t
urtw_write32_c(struct urtw_softc *sc, int val, uint32_t data)
{
struct usb_device_request req;
URTW_ASSERT_LOCKED(sc);
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = URTW_8187_SETREGS_REQ;
USETW(req.wValue, (val & 0xff) | 0xff00);
USETW(req.wIndex, (val >> 8) & 0x3);
USETW(req.wLength, sizeof(uint32_t));
return (urtw_do_request(sc, &req, &data));
}
static usb_error_t
urtw_get_macaddr(struct urtw_softc *sc)
{
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t data;
usb_error_t error;
error = urtw_eprom_read32(sc, URTW_EPROM_MACADDR, &data);
if (error != 0)
goto fail;
- sc->sc_bssid[0] = data & 0xff;
- sc->sc_bssid[1] = (data & 0xff00) >> 8;
+ ic->ic_macaddr[0] = data & 0xff;
+ ic->ic_macaddr[1] = (data & 0xff00) >> 8;
error = urtw_eprom_read32(sc, URTW_EPROM_MACADDR + 1, &data);
if (error != 0)
goto fail;
- sc->sc_bssid[2] = data & 0xff;
- sc->sc_bssid[3] = (data & 0xff00) >> 8;
+ ic->ic_macaddr[2] = data & 0xff;
+ ic->ic_macaddr[3] = (data & 0xff00) >> 8;
error = urtw_eprom_read32(sc, URTW_EPROM_MACADDR + 2, &data);
if (error != 0)
goto fail;
- sc->sc_bssid[4] = data & 0xff;
- sc->sc_bssid[5] = (data & 0xff00) >> 8;
+ ic->ic_macaddr[4] = data & 0xff;
+ ic->ic_macaddr[5] = (data & 0xff00) >> 8;
fail:
return (error);
}
static usb_error_t
urtw_eprom_read32(struct urtw_softc *sc, uint32_t addr, uint32_t *data)
{
#define URTW_READCMD_LEN 3
int addrlen, i;
int16_t addrstr[8], data16, readcmd[] = { 1, 1, 0 };
usb_error_t error;
/* NB: make sure the buffer is initialized */
*data = 0;
/* enable EPROM programming */
urtw_write8_m(sc, URTW_EPROM_CMD, URTW_EPROM_CMD_PROGRAM_MODE);
DELAY(URTW_EPROM_DELAY);
error = urtw_eprom_cs(sc, URTW_EPROM_ENABLE);
if (error != 0)
goto fail;
error = urtw_eprom_ck(sc);
if (error != 0)
goto fail;
error = urtw_eprom_sendbits(sc, readcmd, URTW_READCMD_LEN);
if (error != 0)
goto fail;
if (sc->sc_epromtype == URTW_EEPROM_93C56) {
addrlen = 8;
addrstr[0] = addr & (1 << 7);
addrstr[1] = addr & (1 << 6);
addrstr[2] = addr & (1 << 5);
addrstr[3] = addr & (1 << 4);
addrstr[4] = addr & (1 << 3);
addrstr[5] = addr & (1 << 2);
addrstr[6] = addr & (1 << 1);
addrstr[7] = addr & (1 << 0);
} else {
addrlen=6;
addrstr[0] = addr & (1 << 5);
addrstr[1] = addr & (1 << 4);
addrstr[2] = addr & (1 << 3);
addrstr[3] = addr & (1 << 2);
addrstr[4] = addr & (1 << 1);
addrstr[5] = addr & (1 << 0);
}
error = urtw_eprom_sendbits(sc, addrstr, addrlen);
if (error != 0)
goto fail;
error = urtw_eprom_writebit(sc, 0);
if (error != 0)
goto fail;
for (i = 0; i < 16; i++) {
error = urtw_eprom_ck(sc);
if (error != 0)
goto fail;
error = urtw_eprom_readbit(sc, &data16);
if (error != 0)
goto fail;
(*data) |= (data16 << (15 - i));
}
error = urtw_eprom_cs(sc, URTW_EPROM_DISABLE);
if (error != 0)
goto fail;
error = urtw_eprom_ck(sc);
if (error != 0)
goto fail;
/* now disable EPROM programming */
urtw_write8_m(sc, URTW_EPROM_CMD, URTW_EPROM_CMD_NORMAL_MODE);
fail:
return (error);
#undef URTW_READCMD_LEN
}
static usb_error_t
urtw_eprom_cs(struct urtw_softc *sc, int able)
{
uint8_t data;
usb_error_t error;
urtw_read8_m(sc, URTW_EPROM_CMD, &data);
if (able == URTW_EPROM_ENABLE)
urtw_write8_m(sc, URTW_EPROM_CMD, data | URTW_EPROM_CS);
else
urtw_write8_m(sc, URTW_EPROM_CMD, data & ~URTW_EPROM_CS);
DELAY(URTW_EPROM_DELAY);
fail:
return (error);
}
static usb_error_t
urtw_eprom_ck(struct urtw_softc *sc)
{
uint8_t data;
usb_error_t error;
/* masking */
urtw_read8_m(sc, URTW_EPROM_CMD, &data);
urtw_write8_m(sc, URTW_EPROM_CMD, data | URTW_EPROM_CK);
DELAY(URTW_EPROM_DELAY);
/* unmasking */
urtw_read8_m(sc, URTW_EPROM_CMD, &data);
urtw_write8_m(sc, URTW_EPROM_CMD, data & ~URTW_EPROM_CK);
DELAY(URTW_EPROM_DELAY);
fail:
return (error);
}
static usb_error_t
urtw_eprom_readbit(struct urtw_softc *sc, int16_t *data)
{
uint8_t data8;
usb_error_t error;
urtw_read8_m(sc, URTW_EPROM_CMD, &data8);
*data = (data8 & URTW_EPROM_READBIT) ? 1 : 0;
DELAY(URTW_EPROM_DELAY);
fail:
return (error);
}
static usb_error_t
urtw_eprom_writebit(struct urtw_softc *sc, int16_t bit)
{
uint8_t data;
usb_error_t error;
urtw_read8_m(sc, URTW_EPROM_CMD, &data);
if (bit != 0)
urtw_write8_m(sc, URTW_EPROM_CMD, data | URTW_EPROM_WRITEBIT);
else
urtw_write8_m(sc, URTW_EPROM_CMD, data & ~URTW_EPROM_WRITEBIT);
DELAY(URTW_EPROM_DELAY);
fail:
return (error);
}
static usb_error_t
urtw_eprom_sendbits(struct urtw_softc *sc, int16_t *buf, int buflen)
{
int i = 0;
usb_error_t error = 0;
for (i = 0; i < buflen; i++) {
error = urtw_eprom_writebit(sc, buf[i]);
if (error != 0)
goto fail;
error = urtw_eprom_ck(sc);
if (error != 0)
goto fail;
}
fail:
return (error);
}
static usb_error_t
urtw_get_txpwr(struct urtw_softc *sc)
{
int i, j;
uint32_t data;
usb_error_t error;
error = urtw_eprom_read32(sc, URTW_EPROM_TXPW_BASE, &data);
if (error != 0)
goto fail;
sc->sc_txpwr_cck_base = data & 0xf;
sc->sc_txpwr_ofdm_base = (data >> 4) & 0xf;
for (i = 1, j = 0; i < 6; i += 2, j++) {
error = urtw_eprom_read32(sc, URTW_EPROM_TXPW0 + j, &data);
if (error != 0)
goto fail;
sc->sc_txpwr_cck[i] = data & 0xf;
sc->sc_txpwr_cck[i + 1] = (data & 0xf00) >> 8;
sc->sc_txpwr_ofdm[i] = (data & 0xf0) >> 4;
sc->sc_txpwr_ofdm[i + 1] = (data & 0xf000) >> 12;
}
for (i = 1, j = 0; i < 4; i += 2, j++) {
error = urtw_eprom_read32(sc, URTW_EPROM_TXPW1 + j, &data);
if (error != 0)
goto fail;
sc->sc_txpwr_cck[i + 6] = data & 0xf;
sc->sc_txpwr_cck[i + 6 + 1] = (data & 0xf00) >> 8;
sc->sc_txpwr_ofdm[i + 6] = (data & 0xf0) >> 4;
sc->sc_txpwr_ofdm[i + 6 + 1] = (data & 0xf000) >> 12;
}
if (sc->sc_flags & URTW_RTL8187B) {
error = urtw_eprom_read32(sc, URTW_EPROM_TXPW2, &data);
if (error != 0)
goto fail;
sc->sc_txpwr_cck[1 + 6 + 4] = data & 0xf;
sc->sc_txpwr_ofdm[1 + 6 + 4] = (data & 0xf0) >> 4;
error = urtw_eprom_read32(sc, 0x0a, &data);
if (error != 0)
goto fail;
sc->sc_txpwr_cck[2 + 6 + 4] = data & 0xf;
sc->sc_txpwr_ofdm[2 + 6 + 4] = (data & 0xf0) >> 4;
error = urtw_eprom_read32(sc, 0x1c, &data);
if (error != 0)
goto fail;
sc->sc_txpwr_cck[3 + 6 + 4] = data & 0xf;
sc->sc_txpwr_cck[3 + 6 + 4 + 1] = (data & 0xf00) >> 8;
sc->sc_txpwr_ofdm[3 + 6 + 4] = (data & 0xf0) >> 4;
sc->sc_txpwr_ofdm[3 + 6 + 4 + 1] = (data & 0xf000) >> 12;
} else {
for (i = 1, j = 0; i < 4; i += 2, j++) {
error = urtw_eprom_read32(sc, URTW_EPROM_TXPW2 + j,
&data);
if (error != 0)
goto fail;
sc->sc_txpwr_cck[i + 6 + 4] = data & 0xf;
sc->sc_txpwr_cck[i + 6 + 4 + 1] = (data & 0xf00) >> 8;
sc->sc_txpwr_ofdm[i + 6 + 4] = (data & 0xf0) >> 4;
sc->sc_txpwr_ofdm[i + 6 + 4 + 1] = (data & 0xf000) >> 12;
}
}
fail:
return (error);
}
static usb_error_t
urtw_get_rfchip(struct urtw_softc *sc)
{
int ret;
uint8_t data8;
uint32_t data;
usb_error_t error;
if (sc->sc_flags & URTW_RTL8187B) {
urtw_read8_m(sc, 0xe1, &data8);
switch (data8) {
case 0:
sc->sc_flags |= URTW_RTL8187B_REV_B;
break;
case 1:
sc->sc_flags |= URTW_RTL8187B_REV_D;
break;
case 2:
sc->sc_flags |= URTW_RTL8187B_REV_E;
break;
default:
device_printf(sc->sc_dev, "unknown type: %#x\n", data8);
sc->sc_flags |= URTW_RTL8187B_REV_B;
break;
}
} else {
urtw_read32_m(sc, URTW_TX_CONF, &data);
switch (data & URTW_TX_HWMASK) {
case URTW_TX_R8187vD_B:
sc->sc_flags |= URTW_RTL8187B;
break;
case URTW_TX_R8187vD:
break;
default:
device_printf(sc->sc_dev, "unknown RTL8187L type: %#x\n",
data & URTW_TX_HWMASK);
break;
}
}
error = urtw_eprom_read32(sc, URTW_EPROM_RFCHIPID, &data);
if (error != 0)
goto fail;
switch (data & 0xff) {
case URTW_EPROM_RFCHIPID_RTL8225U:
error = urtw_8225_isv2(sc, &ret);
if (error != 0)
goto fail;
if (ret == 0) {
sc->sc_rf_init = urtw_8225_rf_init;
sc->sc_rf_set_sens = urtw_8225_rf_set_sens;
sc->sc_rf_set_chan = urtw_8225_rf_set_chan;
sc->sc_rf_stop = urtw_8225_rf_stop;
} else {
sc->sc_rf_init = urtw_8225v2_rf_init;
sc->sc_rf_set_chan = urtw_8225v2_rf_set_chan;
sc->sc_rf_stop = urtw_8225_rf_stop;
}
sc->sc_max_sens = URTW_8225_RF_MAX_SENS;
sc->sc_sens = URTW_8225_RF_DEF_SENS;
break;
case URTW_EPROM_RFCHIPID_RTL8225Z2:
sc->sc_rf_init = urtw_8225v2b_rf_init;
sc->sc_rf_set_chan = urtw_8225v2b_rf_set_chan;
sc->sc_max_sens = URTW_8225_RF_MAX_SENS;
sc->sc_sens = URTW_8225_RF_DEF_SENS;
sc->sc_rf_stop = urtw_8225_rf_stop;
break;
default:
DPRINTF(sc, URTW_DEBUG_STATE,
"unsupported RF chip %d\n", data & 0xff);
error = USB_ERR_INVAL;
goto fail;
}
device_printf(sc->sc_dev, "%s rf %s hwrev %s\n",
(sc->sc_flags & URTW_RTL8187B) ? "rtl8187b" : "rtl8187l",
((data & 0xff) == URTW_EPROM_RFCHIPID_RTL8225U) ? "rtl8225u" :
"rtl8225z2",
(sc->sc_flags & URTW_RTL8187B) ? ((data8 == 0) ? "b" :
(data8 == 1) ? "d" : "e") : "none");
fail:
return (error);
}
static usb_error_t
urtw_led_init(struct urtw_softc *sc)
{
uint32_t rev;
usb_error_t error;
urtw_read8_m(sc, URTW_PSR, &sc->sc_psr);
error = urtw_eprom_read32(sc, URTW_EPROM_SWREV, &rev);
if (error != 0)
goto fail;
switch (rev & URTW_EPROM_CID_MASK) {
case URTW_EPROM_CID_ALPHA0:
sc->sc_strategy = URTW_SW_LED_MODE1;
break;
case URTW_EPROM_CID_SERCOMM_PS:
sc->sc_strategy = URTW_SW_LED_MODE3;
break;
case URTW_EPROM_CID_HW_LED:
sc->sc_strategy = URTW_HW_LED;
break;
case URTW_EPROM_CID_RSVD0:
case URTW_EPROM_CID_RSVD1:
default:
sc->sc_strategy = URTW_SW_LED_MODE0;
break;
}
sc->sc_gpio_ledpin = URTW_LED_PIN_GPIO0;
fail:
return (error);
}
static usb_error_t
urtw_8225_rf_init(struct urtw_softc *sc)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
int i;
uint16_t data;
usb_error_t error;
error = urtw_8180_set_anaparam(sc, URTW_8225_ANAPARAM_ON);
if (error)
goto fail;
error = urtw_8225_usb_init(sc);
if (error)
goto fail;
urtw_write32_m(sc, URTW_RF_TIMING, 0x000a8008);
urtw_read16_m(sc, URTW_BRSR, &data); /* XXX ??? */
urtw_write16_m(sc, URTW_BRSR, 0xffff);
urtw_write32_m(sc, URTW_RF_PARA, 0x100044);
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
urtw_write8_m(sc, URTW_CONFIG3, 0x44);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
error = urtw_8185_rf_pins_enable(sc);
if (error)
goto fail;
usb_pause_mtx(&sc->sc_mtx, 1000);
for (i = 0; i < N(urtw_8225_rf_part1); i++) {
urtw_8225_write(sc, urtw_8225_rf_part1[i].reg,
urtw_8225_rf_part1[i].val);
usb_pause_mtx(&sc->sc_mtx, 1);
}
usb_pause_mtx(&sc->sc_mtx, 100);
urtw_8225_write(sc,
URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC1);
usb_pause_mtx(&sc->sc_mtx, 200);
urtw_8225_write(sc,
URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC2);
usb_pause_mtx(&sc->sc_mtx, 200);
urtw_8225_write(sc,
URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC3);
for (i = 0; i < 95; i++) {
urtw_8225_write(sc, URTW_8225_ADDR_1_MAGIC, (uint8_t)(i + 1));
urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, urtw_8225_rxgain[i]);
}
urtw_8225_write(sc,
URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC4);
urtw_8225_write(sc,
URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC5);
for (i = 0; i < 128; i++) {
urtw_8187_write_phy_ofdm(sc, 0xb, urtw_8225_agc[i]);
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_8187_write_phy_ofdm(sc, 0xa, (uint8_t)i + 0x80);
usb_pause_mtx(&sc->sc_mtx, 1);
}
for (i = 0; i < N(urtw_8225_rf_part2); i++) {
urtw_8187_write_phy_ofdm(sc, urtw_8225_rf_part2[i].reg,
urtw_8225_rf_part2[i].val);
usb_pause_mtx(&sc->sc_mtx, 1);
}
error = urtw_8225_setgain(sc, 4);
if (error)
goto fail;
for (i = 0; i < N(urtw_8225_rf_part3); i++) {
urtw_8187_write_phy_cck(sc, urtw_8225_rf_part3[i].reg,
urtw_8225_rf_part3[i].val);
usb_pause_mtx(&sc->sc_mtx, 1);
}
urtw_write8_m(sc, URTW_TESTR, 0x0d);
error = urtw_8225_set_txpwrlvl(sc, 1);
if (error)
goto fail;
urtw_8187_write_phy_cck(sc, 0x10, 0x9b);
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_8187_write_phy_ofdm(sc, 0x26, 0x90);
usb_pause_mtx(&sc->sc_mtx, 1);
/* TX ant A, 0x0 for B */
error = urtw_8185_tx_antenna(sc, 0x3);
if (error)
goto fail;
urtw_write32_m(sc, URTW_HSSI_PARA, 0x3dc00002);
error = urtw_8225_rf_set_chan(sc, 1);
fail:
return (error);
#undef N
}
static usb_error_t
urtw_8185_rf_pins_enable(struct urtw_softc *sc)
{
usb_error_t error = 0;
urtw_write16_m(sc, URTW_RF_PINS_ENABLE, 0x1ff7);
fail:
return (error);
}
static usb_error_t
urtw_8185_tx_antenna(struct urtw_softc *sc, uint8_t ant)
{
usb_error_t error;
urtw_write8_m(sc, URTW_TX_ANTENNA, ant);
usb_pause_mtx(&sc->sc_mtx, 1);
fail:
return (error);
}
static usb_error_t
urtw_8187_write_phy_ofdm_c(struct urtw_softc *sc, uint8_t addr, uint32_t data)
{
data = data & 0xff;
return urtw_8187_write_phy(sc, addr, data);
}
static usb_error_t
urtw_8187_write_phy_cck_c(struct urtw_softc *sc, uint8_t addr, uint32_t data)
{
data = data & 0xff;
return urtw_8187_write_phy(sc, addr, data | 0x10000);
}
static usb_error_t
urtw_8187_write_phy(struct urtw_softc *sc, uint8_t addr, uint32_t data)
{
uint32_t phyw;
usb_error_t error;
phyw = ((data << 8) | (addr | 0x80));
urtw_write8_m(sc, URTW_PHY_MAGIC4, ((phyw & 0xff000000) >> 24));
urtw_write8_m(sc, URTW_PHY_MAGIC3, ((phyw & 0x00ff0000) >> 16));
urtw_write8_m(sc, URTW_PHY_MAGIC2, ((phyw & 0x0000ff00) >> 8));
urtw_write8_m(sc, URTW_PHY_MAGIC1, ((phyw & 0x000000ff)));
usb_pause_mtx(&sc->sc_mtx, 1);
fail:
return (error);
}
static usb_error_t
urtw_8225_setgain(struct urtw_softc *sc, int16_t gain)
{
usb_error_t error;
urtw_8187_write_phy_ofdm(sc, 0x0d, urtw_8225_gain[gain * 4]);
urtw_8187_write_phy_ofdm(sc, 0x1b, urtw_8225_gain[gain * 4 + 2]);
urtw_8187_write_phy_ofdm(sc, 0x1d, urtw_8225_gain[gain * 4 + 3]);
urtw_8187_write_phy_ofdm(sc, 0x23, urtw_8225_gain[gain * 4 + 1]);
fail:
return (error);
}
static usb_error_t
urtw_8225_usb_init(struct urtw_softc *sc)
{
uint8_t data;
usb_error_t error;
urtw_write8_m(sc, URTW_RF_PINS_SELECT + 1, 0);
urtw_write8_m(sc, URTW_GPIO, 0);
error = urtw_read8e(sc, 0x53, &data);
if (error)
goto fail;
error = urtw_write8e(sc, 0x53, data | (1 << 7));
if (error)
goto fail;
urtw_write8_m(sc, URTW_RF_PINS_SELECT + 1, 4);
urtw_write8_m(sc, URTW_GPIO, 0x20);
urtw_write8_m(sc, URTW_GP_ENABLE, 0);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, 0x80);
urtw_write16_m(sc, URTW_RF_PINS_SELECT, 0x80);
urtw_write16_m(sc, URTW_RF_PINS_ENABLE, 0x80);
usb_pause_mtx(&sc->sc_mtx, 500);
fail:
return (error);
}
static usb_error_t
urtw_8225_write_c(struct urtw_softc *sc, uint8_t addr, uint16_t data)
{
uint16_t d80, d82, d84;
usb_error_t error;
urtw_read16_m(sc, URTW_RF_PINS_OUTPUT, &d80);
d80 &= URTW_RF_PINS_MAGIC1;
urtw_read16_m(sc, URTW_RF_PINS_ENABLE, &d82);
urtw_read16_m(sc, URTW_RF_PINS_SELECT, &d84);
d84 &= URTW_RF_PINS_MAGIC2;
urtw_write16_m(sc, URTW_RF_PINS_ENABLE, d82 | URTW_RF_PINS_MAGIC3);
urtw_write16_m(sc, URTW_RF_PINS_SELECT, d84 | URTW_RF_PINS_MAGIC3);
DELAY(10);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, d80 | URTW_BB_HOST_BANG_EN);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, d80);
DELAY(10);
error = urtw_8225_write_s16(sc, addr, 0x8225, &data);
if (error != 0)
goto fail;
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, d80 | URTW_BB_HOST_BANG_EN);
DELAY(10);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, d80 | URTW_BB_HOST_BANG_EN);
urtw_write16_m(sc, URTW_RF_PINS_SELECT, d84);
usb_pause_mtx(&sc->sc_mtx, 2);
fail:
return (error);
}
static usb_error_t
urtw_8225_write_s16(struct urtw_softc *sc, uint8_t addr, int index,
uint16_t *data)
{
uint8_t buf[2];
uint16_t data16;
struct usb_device_request req;
usb_error_t error = 0;
data16 = *data;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = URTW_8187_SETREGS_REQ;
USETW(req.wValue, addr);
USETW(req.wIndex, index);
USETW(req.wLength, sizeof(uint16_t));
buf[0] = (data16 & 0x00ff);
buf[1] = (data16 & 0xff00) >> 8;
error = urtw_do_request(sc, &req, buf);
return (error);
}
static usb_error_t
urtw_8225_rf_set_chan(struct urtw_softc *sc, int chan)
{
usb_error_t error;
error = urtw_8225_set_txpwrlvl(sc, chan);
if (error)
goto fail;
urtw_8225_write(sc, URTW_8225_ADDR_7_MAGIC, urtw_8225_channel[chan]);
usb_pause_mtx(&sc->sc_mtx, 10);
fail:
return (error);
}
static usb_error_t
urtw_8225_rf_set_sens(struct urtw_softc *sc, int sens)
{
usb_error_t error;
if (sens < 0 || sens > 6)
return -1;
if (sens > 4)
urtw_8225_write(sc,
URTW_8225_ADDR_C_MAGIC, URTW_8225_ADDR_C_DATA_MAGIC1);
else
urtw_8225_write(sc,
URTW_8225_ADDR_C_MAGIC, URTW_8225_ADDR_C_DATA_MAGIC2);
sens = 6 - sens;
error = urtw_8225_setgain(sc, sens);
if (error)
goto fail;
urtw_8187_write_phy_cck(sc, 0x41, urtw_8225_threshold[sens]);
fail:
return (error);
}
static usb_error_t
urtw_8225_set_txpwrlvl(struct urtw_softc *sc, int chan)
{
int i, idx, set;
uint8_t *cck_pwltable;
uint8_t cck_pwrlvl_max, ofdm_pwrlvl_min, ofdm_pwrlvl_max;
uint8_t cck_pwrlvl = sc->sc_txpwr_cck[chan] & 0xff;
uint8_t ofdm_pwrlvl = sc->sc_txpwr_ofdm[chan] & 0xff;
usb_error_t error;
cck_pwrlvl_max = 11;
ofdm_pwrlvl_max = 25; /* 12 -> 25 */
ofdm_pwrlvl_min = 10;
/* CCK power setting */
cck_pwrlvl = (cck_pwrlvl > cck_pwrlvl_max) ? cck_pwrlvl_max : cck_pwrlvl;
idx = cck_pwrlvl % 6;
set = cck_pwrlvl / 6;
cck_pwltable = (chan == 14) ? urtw_8225_txpwr_cck_ch14 :
urtw_8225_txpwr_cck;
urtw_write8_m(sc, URTW_TX_GAIN_CCK,
urtw_8225_tx_gain_cck_ofdm[set] >> 1);
for (i = 0; i < 8; i++) {
urtw_8187_write_phy_cck(sc, 0x44 + i,
cck_pwltable[idx * 8 + i]);
}
usb_pause_mtx(&sc->sc_mtx, 1);
/* OFDM power setting */
ofdm_pwrlvl = (ofdm_pwrlvl > (ofdm_pwrlvl_max - ofdm_pwrlvl_min)) ?
ofdm_pwrlvl_max : ofdm_pwrlvl + ofdm_pwrlvl_min;
ofdm_pwrlvl = (ofdm_pwrlvl > 35) ? 35 : ofdm_pwrlvl;
idx = ofdm_pwrlvl % 6;
set = ofdm_pwrlvl / 6;
error = urtw_8185_set_anaparam2(sc, URTW_8225_ANAPARAM2_ON);
if (error)
goto fail;
urtw_8187_write_phy_ofdm(sc, 2, 0x42);
urtw_8187_write_phy_ofdm(sc, 6, 0);
urtw_8187_write_phy_ofdm(sc, 8, 0);
urtw_write8_m(sc, URTW_TX_GAIN_OFDM,
urtw_8225_tx_gain_cck_ofdm[set] >> 1);
urtw_8187_write_phy_ofdm(sc, 0x5, urtw_8225_txpwr_ofdm[idx]);
urtw_8187_write_phy_ofdm(sc, 0x7, urtw_8225_txpwr_ofdm[idx]);
usb_pause_mtx(&sc->sc_mtx, 1);
fail:
return (error);
}
static usb_error_t
urtw_8225_rf_stop(struct urtw_softc *sc)
{
uint8_t data;
usb_error_t error;
urtw_8225_write(sc, 0x4, 0x1f);
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
urtw_read8_m(sc, URTW_CONFIG3, &data);
urtw_write8_m(sc, URTW_CONFIG3, data | URTW_CONFIG3_ANAPARAM_WRITE);
if (sc->sc_flags & URTW_RTL8187B) {
urtw_write32_m(sc, URTW_ANAPARAM2,
URTW_8187B_8225_ANAPARAM2_OFF);
urtw_write32_m(sc, URTW_ANAPARAM, URTW_8187B_8225_ANAPARAM_OFF);
urtw_write32_m(sc, URTW_ANAPARAM3,
URTW_8187B_8225_ANAPARAM3_OFF);
} else {
urtw_write32_m(sc, URTW_ANAPARAM2, URTW_8225_ANAPARAM2_OFF);
urtw_write32_m(sc, URTW_ANAPARAM, URTW_8225_ANAPARAM_OFF);
}
urtw_write8_m(sc, URTW_CONFIG3, data & ~URTW_CONFIG3_ANAPARAM_WRITE);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
fail:
return (error);
}
static usb_error_t
urtw_8225v2_rf_init(struct urtw_softc *sc)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
int i;
uint16_t data;
uint32_t data32;
usb_error_t error;
error = urtw_8180_set_anaparam(sc, URTW_8225_ANAPARAM_ON);
if (error)
goto fail;
error = urtw_8225_usb_init(sc);
if (error)
goto fail;
urtw_write32_m(sc, URTW_RF_TIMING, 0x000a8008);
urtw_read16_m(sc, URTW_BRSR, &data); /* XXX ??? */
urtw_write16_m(sc, URTW_BRSR, 0xffff);
urtw_write32_m(sc, URTW_RF_PARA, 0x100044);
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
urtw_write8_m(sc, URTW_CONFIG3, 0x44);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
error = urtw_8185_rf_pins_enable(sc);
if (error)
goto fail;
usb_pause_mtx(&sc->sc_mtx, 500);
for (i = 0; i < N(urtw_8225v2_rf_part1); i++) {
urtw_8225_write(sc, urtw_8225v2_rf_part1[i].reg,
urtw_8225v2_rf_part1[i].val);
}
usb_pause_mtx(&sc->sc_mtx, 50);
urtw_8225_write(sc,
URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC1);
for (i = 0; i < 95; i++) {
urtw_8225_write(sc, URTW_8225_ADDR_1_MAGIC, (uint8_t)(i + 1));
urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC,
urtw_8225v2_rxgain[i]);
}
urtw_8225_write(sc,
URTW_8225_ADDR_3_MAGIC, URTW_8225_ADDR_3_DATA_MAGIC1);
urtw_8225_write(sc,
URTW_8225_ADDR_5_MAGIC, URTW_8225_ADDR_5_DATA_MAGIC1);
urtw_8225_write(sc,
URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC2);
urtw_8225_write(sc,
URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC1);
usb_pause_mtx(&sc->sc_mtx, 100);
urtw_8225_write(sc,
URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC2);
usb_pause_mtx(&sc->sc_mtx, 100);
error = urtw_8225_read(sc, URTW_8225_ADDR_6_MAGIC, &data32);
if (error != 0)
goto fail;
if (data32 != URTW_8225_ADDR_6_DATA_MAGIC1)
device_printf(sc->sc_dev, "expect 0xe6!! (0x%x)\n", data32);
if (!(data32 & URTW_8225_ADDR_6_DATA_MAGIC2)) {
urtw_8225_write(sc,
URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC1);
usb_pause_mtx(&sc->sc_mtx, 100);
urtw_8225_write(sc,
URTW_8225_ADDR_2_MAGIC, URTW_8225_ADDR_2_DATA_MAGIC2);
usb_pause_mtx(&sc->sc_mtx, 50);
error = urtw_8225_read(sc, URTW_8225_ADDR_6_MAGIC, &data32);
if (error != 0)
goto fail;
if (!(data32 & URTW_8225_ADDR_6_DATA_MAGIC2))
device_printf(sc->sc_dev, "RF calibration failed\n");
}
usb_pause_mtx(&sc->sc_mtx, 100);
urtw_8225_write(sc,
URTW_8225_ADDR_0_MAGIC, URTW_8225_ADDR_0_DATA_MAGIC6);
for (i = 0; i < 128; i++) {
urtw_8187_write_phy_ofdm(sc, 0xb, urtw_8225_agc[i]);
urtw_8187_write_phy_ofdm(sc, 0xa, (uint8_t)i + 0x80);
}
for (i = 0; i < N(urtw_8225v2_rf_part2); i++) {
urtw_8187_write_phy_ofdm(sc, urtw_8225v2_rf_part2[i].reg,
urtw_8225v2_rf_part2[i].val);
}
error = urtw_8225v2_setgain(sc, 4);
if (error)
goto fail;
for (i = 0; i < N(urtw_8225v2_rf_part3); i++) {
urtw_8187_write_phy_cck(sc, urtw_8225v2_rf_part3[i].reg,
urtw_8225v2_rf_part3[i].val);
}
urtw_write8_m(sc, URTW_TESTR, 0x0d);
error = urtw_8225v2_set_txpwrlvl(sc, 1);
if (error)
goto fail;
urtw_8187_write_phy_cck(sc, 0x10, 0x9b);
urtw_8187_write_phy_ofdm(sc, 0x26, 0x90);
/* TX ant A, 0x0 for B */
error = urtw_8185_tx_antenna(sc, 0x3);
if (error)
goto fail;
urtw_write32_m(sc, URTW_HSSI_PARA, 0x3dc00002);
error = urtw_8225_rf_set_chan(sc, 1);
fail:
return (error);
#undef N
}
static usb_error_t
urtw_8225v2_rf_set_chan(struct urtw_softc *sc, int chan)
{
usb_error_t error;
error = urtw_8225v2_set_txpwrlvl(sc, chan);
if (error)
goto fail;
urtw_8225_write(sc, URTW_8225_ADDR_7_MAGIC, urtw_8225_channel[chan]);
usb_pause_mtx(&sc->sc_mtx, 10);
fail:
return (error);
}
static usb_error_t
urtw_8225_read(struct urtw_softc *sc, uint8_t addr, uint32_t *data)
{
int i;
int16_t bit;
uint8_t rlen = 12, wlen = 6;
uint16_t o1, o2, o3, tmp;
uint32_t d2w = ((uint32_t)(addr & 0x1f)) << 27;
uint32_t mask = 0x80000000, value = 0;
usb_error_t error;
urtw_read16_m(sc, URTW_RF_PINS_OUTPUT, &o1);
urtw_read16_m(sc, URTW_RF_PINS_ENABLE, &o2);
urtw_read16_m(sc, URTW_RF_PINS_SELECT, &o3);
urtw_write16_m(sc, URTW_RF_PINS_ENABLE, o2 | URTW_RF_PINS_MAGIC4);
urtw_write16_m(sc, URTW_RF_PINS_SELECT, o3 | URTW_RF_PINS_MAGIC4);
o1 &= ~URTW_RF_PINS_MAGIC4;
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_EN);
DELAY(5);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1);
DELAY(5);
for (i = 0; i < (wlen / 2); i++, mask = mask >> 1) {
bit = ((d2w & mask) != 0) ? 1 : 0;
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 |
URTW_BB_HOST_BANG_CLK);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 |
URTW_BB_HOST_BANG_CLK);
DELAY(2);
mask = mask >> 1;
if (i == 2)
break;
bit = ((d2w & mask) != 0) ? 1 : 0;
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 |
URTW_BB_HOST_BANG_CLK);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 |
URTW_BB_HOST_BANG_CLK);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1);
DELAY(1);
}
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 | URTW_BB_HOST_BANG_RW |
URTW_BB_HOST_BANG_CLK);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, bit | o1 | URTW_BB_HOST_BANG_RW);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_RW);
DELAY(2);
mask = 0x800;
for (i = 0; i < rlen; i++, mask = mask >> 1) {
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT,
o1 | URTW_BB_HOST_BANG_RW);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT,
o1 | URTW_BB_HOST_BANG_RW | URTW_BB_HOST_BANG_CLK);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT,
o1 | URTW_BB_HOST_BANG_RW | URTW_BB_HOST_BANG_CLK);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT,
o1 | URTW_BB_HOST_BANG_RW | URTW_BB_HOST_BANG_CLK);
DELAY(2);
urtw_read16_m(sc, URTW_RF_PINS_INPUT, &tmp);
value |= ((tmp & URTW_BB_HOST_BANG_CLK) ? mask : 0);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT,
o1 | URTW_BB_HOST_BANG_RW);
DELAY(2);
}
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, o1 | URTW_BB_HOST_BANG_EN |
URTW_BB_HOST_BANG_RW);
DELAY(2);
urtw_write16_m(sc, URTW_RF_PINS_ENABLE, o2);
urtw_write16_m(sc, URTW_RF_PINS_SELECT, o3);
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, URTW_RF_PINS_OUTPUT_MAGIC1);
if (data != NULL)
*data = value;
fail:
return (error);
}
static usb_error_t
urtw_8225v2_set_txpwrlvl(struct urtw_softc *sc, int chan)
{
int i;
uint8_t *cck_pwrtable;
uint8_t cck_pwrlvl_max = 15, ofdm_pwrlvl_max = 25, ofdm_pwrlvl_min = 10;
uint8_t cck_pwrlvl = sc->sc_txpwr_cck[chan] & 0xff;
uint8_t ofdm_pwrlvl = sc->sc_txpwr_ofdm[chan] & 0xff;
usb_error_t error;
/* CCK power setting */
cck_pwrlvl = (cck_pwrlvl > cck_pwrlvl_max) ? cck_pwrlvl_max : cck_pwrlvl;
cck_pwrlvl += sc->sc_txpwr_cck_base;
cck_pwrlvl = (cck_pwrlvl > 35) ? 35 : cck_pwrlvl;
cck_pwrtable = (chan == 14) ? urtw_8225v2_txpwr_cck_ch14 :
urtw_8225v2_txpwr_cck;
for (i = 0; i < 8; i++)
urtw_8187_write_phy_cck(sc, 0x44 + i, cck_pwrtable[i]);
urtw_write8_m(sc, URTW_TX_GAIN_CCK,
urtw_8225v2_tx_gain_cck_ofdm[cck_pwrlvl]);
usb_pause_mtx(&sc->sc_mtx, 1);
/* OFDM power setting */
ofdm_pwrlvl = (ofdm_pwrlvl > (ofdm_pwrlvl_max - ofdm_pwrlvl_min)) ?
ofdm_pwrlvl_max : ofdm_pwrlvl + ofdm_pwrlvl_min;
ofdm_pwrlvl += sc->sc_txpwr_ofdm_base;
ofdm_pwrlvl = (ofdm_pwrlvl > 35) ? 35 : ofdm_pwrlvl;
error = urtw_8185_set_anaparam2(sc, URTW_8225_ANAPARAM2_ON);
if (error)
goto fail;
urtw_8187_write_phy_ofdm(sc, 2, 0x42);
urtw_8187_write_phy_ofdm(sc, 5, 0x0);
urtw_8187_write_phy_ofdm(sc, 6, 0x40);
urtw_8187_write_phy_ofdm(sc, 7, 0x0);
urtw_8187_write_phy_ofdm(sc, 8, 0x40);
urtw_write8_m(sc, URTW_TX_GAIN_OFDM,
urtw_8225v2_tx_gain_cck_ofdm[ofdm_pwrlvl]);
usb_pause_mtx(&sc->sc_mtx, 1);
fail:
return (error);
}
static usb_error_t
urtw_8225v2_setgain(struct urtw_softc *sc, int16_t gain)
{
uint8_t *gainp;
usb_error_t error;
/* XXX for A? */
gainp = urtw_8225v2_gain_bg;
urtw_8187_write_phy_ofdm(sc, 0x0d, gainp[gain * 3]);
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_8187_write_phy_ofdm(sc, 0x1b, gainp[gain * 3 + 1]);
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_8187_write_phy_ofdm(sc, 0x1d, gainp[gain * 3 + 2]);
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_8187_write_phy_ofdm(sc, 0x21, 0x17);
usb_pause_mtx(&sc->sc_mtx, 1);
fail:
return (error);
}
static usb_error_t
urtw_8225_isv2(struct urtw_softc *sc, int *ret)
{
uint32_t data;
usb_error_t error;
*ret = 1;
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, URTW_RF_PINS_MAGIC5);
urtw_write16_m(sc, URTW_RF_PINS_SELECT, URTW_RF_PINS_MAGIC5);
urtw_write16_m(sc, URTW_RF_PINS_ENABLE, URTW_RF_PINS_MAGIC5);
usb_pause_mtx(&sc->sc_mtx, 500);
urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC,
URTW_8225_ADDR_0_DATA_MAGIC1);
error = urtw_8225_read(sc, URTW_8225_ADDR_8_MAGIC, &data);
if (error != 0)
goto fail;
if (data != URTW_8225_ADDR_8_DATA_MAGIC1)
*ret = 0;
else {
error = urtw_8225_read(sc, URTW_8225_ADDR_9_MAGIC, &data);
if (error != 0)
goto fail;
if (data != URTW_8225_ADDR_9_DATA_MAGIC1)
*ret = 0;
}
urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC,
URTW_8225_ADDR_0_DATA_MAGIC2);
fail:
return (error);
}
static usb_error_t
urtw_8225v2b_rf_init(struct urtw_softc *sc)
{
-#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
+ struct ieee80211com *ic = &sc->sc_ic;
int i;
uint8_t data8;
usb_error_t error;
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
/*
* initialize extra registers on 8187
*/
urtw_write16_m(sc, URTW_BRSR_8187B, 0xfff);
/* retry limit */
urtw_read8_m(sc, URTW_CW_CONF, &data8);
data8 |= URTW_CW_CONF_PERPACKET_RETRY;
urtw_write8_m(sc, URTW_CW_CONF, data8);
/* TX AGC */
urtw_read8_m(sc, URTW_TX_AGC_CTL, &data8);
data8 |= URTW_TX_AGC_CTL_PERPACKET_GAIN;
urtw_write8_m(sc, URTW_TX_AGC_CTL, data8);
/* Auto Rate Fallback Control */
#define URTW_ARFR 0x1e0
urtw_write16_m(sc, URTW_ARFR, 0xfff);
urtw_read8_m(sc, URTW_RATE_FALLBACK, &data8);
urtw_write8_m(sc, URTW_RATE_FALLBACK,
data8 | URTW_RATE_FALLBACK_ENABLE);
urtw_read8_m(sc, URTW_MSR, &data8);
urtw_write8_m(sc, URTW_MSR, data8 & 0xf3);
urtw_read8_m(sc, URTW_MSR, &data8);
urtw_write8_m(sc, URTW_MSR, data8 | URTW_MSR_LINK_ENEDCA);
urtw_write8_m(sc, URTW_ACM_CONTROL, sc->sc_acmctl);
urtw_write16_m(sc, URTW_ATIM_WND, 2);
urtw_write16_m(sc, URTW_BEACON_INTERVAL, 100);
#define URTW_FEMR_FOR_8187B 0x1d4
urtw_write16_m(sc, URTW_FEMR_FOR_8187B, 0xffff);
/* led type */
urtw_read8_m(sc, URTW_CONFIG1, &data8);
data8 = (data8 & 0x3f) | 0x80;
urtw_write8_m(sc, URTW_CONFIG1, data8);
/* applying MAC address again. */
- urtw_write32_m(sc, URTW_MAC0, ((uint32_t *)sc->sc_bssid)[0]);
- urtw_write16_m(sc, URTW_MAC4, ((uint32_t *)sc->sc_bssid)[1] & 0xffff);
+ urtw_write32_m(sc, URTW_MAC0, ((uint32_t *)ic->ic_macaddr)[0]);
+ urtw_write16_m(sc, URTW_MAC4, ((uint32_t *)ic->ic_macaddr)[1] & 0xffff);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
urtw_write8_m(sc, URTW_WPA_CONFIG, 0);
/*
* MAC configuration
*/
- for (i = 0; i < N(urtw_8225v2b_rf_part1); i++)
+ for (i = 0; i < nitems(urtw_8225v2b_rf_part1); i++)
urtw_write8_m(sc, urtw_8225v2b_rf_part1[i].reg,
urtw_8225v2b_rf_part1[i].val);
urtw_write16_m(sc, URTW_TID_AC_MAP, 0xfa50);
urtw_write16_m(sc, URTW_INT_MIG, 0x0000);
urtw_write32_m(sc, 0x1f0, 0);
urtw_write32_m(sc, 0x1f4, 0);
urtw_write8_m(sc, 0x1f8, 0);
urtw_write32_m(sc, URTW_RF_TIMING, 0x4001);
#define URTW_RFSW_CTRL 0x272
urtw_write16_m(sc, URTW_RFSW_CTRL, 0x569a);
/*
* initialize PHY
*/
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
urtw_read8_m(sc, URTW_CONFIG3, &data8);
urtw_write8_m(sc, URTW_CONFIG3,
data8 | URTW_CONFIG3_ANAPARAM_WRITE);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
/* setup RFE initial timing */
urtw_write16_m(sc, URTW_RF_PINS_OUTPUT, 0x0480);
urtw_write16_m(sc, URTW_RF_PINS_SELECT, 0x2488);
urtw_write16_m(sc, URTW_RF_PINS_ENABLE, 0x1fff);
usb_pause_mtx(&sc->sc_mtx, 1100);
- for (i = 0; i < N(urtw_8225v2b_rf_part0); i++) {
+ for (i = 0; i < nitems(urtw_8225v2b_rf_part0); i++) {
urtw_8225_write(sc, urtw_8225v2b_rf_part0[i].reg,
urtw_8225v2b_rf_part0[i].val);
usb_pause_mtx(&sc->sc_mtx, 1);
}
urtw_8225_write(sc, 0x00, 0x01b7);
for (i = 0; i < 95; i++) {
urtw_8225_write(sc, URTW_8225_ADDR_1_MAGIC, (uint8_t)(i + 1));
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC,
urtw_8225v2b_rxgain[i]);
usb_pause_mtx(&sc->sc_mtx, 1);
}
urtw_8225_write(sc, URTW_8225_ADDR_3_MAGIC, 0x080);
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_8225_write(sc, URTW_8225_ADDR_5_MAGIC, 0x004);
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, 0x0b7);
usb_pause_mtx(&sc->sc_mtx, 1);
usb_pause_mtx(&sc->sc_mtx, 3000);
urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, 0xc4d);
usb_pause_mtx(&sc->sc_mtx, 2000);
urtw_8225_write(sc, URTW_8225_ADDR_2_MAGIC, 0x44d);
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_8225_write(sc, URTW_8225_ADDR_0_MAGIC, 0x2bf);
usb_pause_mtx(&sc->sc_mtx, 1);
urtw_write8_m(sc, URTW_TX_GAIN_CCK, 0x03);
urtw_write8_m(sc, URTW_TX_GAIN_OFDM, 0x07);
urtw_write8_m(sc, URTW_TX_ANTENNA, 0x03);
urtw_8187_write_phy_ofdm(sc, 0x80, 0x12);
for (i = 0; i < 128; i++) {
uint32_t addr, data;
data = (urtw_8225z2_agc[i] << 8) | 0x0000008f;
addr = ((i + 0x80) << 8) | 0x0000008e;
urtw_8187_write_phy_ofdm(sc, data & 0x7f, (data >> 8) & 0xff);
urtw_8187_write_phy_ofdm(sc, addr & 0x7f, (addr >> 8) & 0xff);
urtw_8187_write_phy_ofdm(sc, 0x0e, 0x00);
}
urtw_8187_write_phy_ofdm(sc, 0x80, 0x10);
- for (i = 0; i < N(urtw_8225v2b_rf_part2); i++)
+ for (i = 0; i < nitems(urtw_8225v2b_rf_part2); i++)
urtw_8187_write_phy_ofdm(sc, i, urtw_8225v2b_rf_part2[i].val);
urtw_write32_m(sc, URTW_8187B_AC_VO, (7 << 12) | (3 << 8) | 0x1c);
urtw_write32_m(sc, URTW_8187B_AC_VI, (7 << 12) | (3 << 8) | 0x1c);
urtw_write32_m(sc, URTW_8187B_AC_BE, (7 << 12) | (3 << 8) | 0x1c);
urtw_write32_m(sc, URTW_8187B_AC_BK, (7 << 12) | (3 << 8) | 0x1c);
urtw_8187_write_phy_ofdm(sc, 0x97, 0x46);
urtw_8187_write_phy_ofdm(sc, 0xa4, 0xb6);
urtw_8187_write_phy_ofdm(sc, 0x85, 0xfc);
urtw_8187_write_phy_cck(sc, 0xc1, 0x88);
fail:
return (error);
#undef N
}
static usb_error_t
urtw_8225v2b_rf_set_chan(struct urtw_softc *sc, int chan)
{
usb_error_t error;
error = urtw_8225v2b_set_txpwrlvl(sc, chan);
if (error)
goto fail;
urtw_8225_write(sc, URTW_8225_ADDR_7_MAGIC, urtw_8225_channel[chan]);
usb_pause_mtx(&sc->sc_mtx, 10);
fail:
return (error);
}
static usb_error_t
urtw_8225v2b_set_txpwrlvl(struct urtw_softc *sc, int chan)
{
int i;
uint8_t *cck_pwrtable;
uint8_t cck_pwrlvl_max = 15;
uint8_t cck_pwrlvl = sc->sc_txpwr_cck[chan] & 0xff;
uint8_t ofdm_pwrlvl = sc->sc_txpwr_ofdm[chan] & 0xff;
usb_error_t error;
/* CCK power setting */
cck_pwrlvl = (cck_pwrlvl > cck_pwrlvl_max) ?
((sc->sc_flags & URTW_RTL8187B_REV_B) ? cck_pwrlvl_max : 22) :
(cck_pwrlvl + ((sc->sc_flags & URTW_RTL8187B_REV_B) ? 0 : 7));
cck_pwrlvl += sc->sc_txpwr_cck_base;
cck_pwrlvl = (cck_pwrlvl > 35) ? 35 : cck_pwrlvl;
cck_pwrtable = (chan == 14) ? urtw_8225v2b_txpwr_cck_ch14 :
urtw_8225v2b_txpwr_cck;
if (sc->sc_flags & URTW_RTL8187B_REV_B)
cck_pwrtable += (cck_pwrlvl <= 6) ? 0 :
((cck_pwrlvl <= 11) ? 8 : 16);
else
cck_pwrtable += (cck_pwrlvl <= 5) ? 0 :
((cck_pwrlvl <= 11) ? 8 : ((cck_pwrlvl <= 17) ? 16 : 24));
for (i = 0; i < 8; i++)
urtw_8187_write_phy_cck(sc, 0x44 + i, cck_pwrtable[i]);
urtw_write8_m(sc, URTW_TX_GAIN_CCK,
urtw_8225v2_tx_gain_cck_ofdm[cck_pwrlvl] << 1);
usb_pause_mtx(&sc->sc_mtx, 1);
/* OFDM power setting */
ofdm_pwrlvl = (ofdm_pwrlvl > 15) ?
((sc->sc_flags & URTW_RTL8187B_REV_B) ? 17 : 25) :
(ofdm_pwrlvl + ((sc->sc_flags & URTW_RTL8187B_REV_B) ? 2 : 10));
ofdm_pwrlvl += sc->sc_txpwr_ofdm_base;
ofdm_pwrlvl = (ofdm_pwrlvl > 35) ? 35 : ofdm_pwrlvl;
urtw_write8_m(sc, URTW_TX_GAIN_OFDM,
urtw_8225v2_tx_gain_cck_ofdm[ofdm_pwrlvl] << 1);
if (sc->sc_flags & URTW_RTL8187B_REV_B) {
if (ofdm_pwrlvl <= 11) {
urtw_8187_write_phy_ofdm(sc, 0x87, 0x60);
urtw_8187_write_phy_ofdm(sc, 0x89, 0x60);
} else {
urtw_8187_write_phy_ofdm(sc, 0x87, 0x5c);
urtw_8187_write_phy_ofdm(sc, 0x89, 0x5c);
}
} else {
if (ofdm_pwrlvl <= 11) {
urtw_8187_write_phy_ofdm(sc, 0x87, 0x5c);
urtw_8187_write_phy_ofdm(sc, 0x89, 0x5c);
} else if (ofdm_pwrlvl <= 17) {
urtw_8187_write_phy_ofdm(sc, 0x87, 0x54);
urtw_8187_write_phy_ofdm(sc, 0x89, 0x54);
} else {
urtw_8187_write_phy_ofdm(sc, 0x87, 0x50);
urtw_8187_write_phy_ofdm(sc, 0x89, 0x50);
}
}
usb_pause_mtx(&sc->sc_mtx, 1);
fail:
return (error);
}
static usb_error_t
urtw_read8e(struct urtw_softc *sc, int val, uint8_t *data)
{
struct usb_device_request req;
usb_error_t error;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = URTW_8187_GETREGS_REQ;
USETW(req.wValue, val | 0xfe00);
USETW(req.wIndex, 0);
USETW(req.wLength, sizeof(uint8_t));
error = urtw_do_request(sc, &req, data);
return (error);
}
static usb_error_t
urtw_write8e(struct urtw_softc *sc, int val, uint8_t data)
{
struct usb_device_request req;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = URTW_8187_SETREGS_REQ;
USETW(req.wValue, val | 0xfe00);
USETW(req.wIndex, 0);
USETW(req.wLength, sizeof(uint8_t));
return (urtw_do_request(sc, &req, &data));
}
static usb_error_t
urtw_8180_set_anaparam(struct urtw_softc *sc, uint32_t val)
{
uint8_t data;
usb_error_t error;
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
urtw_read8_m(sc, URTW_CONFIG3, &data);
urtw_write8_m(sc, URTW_CONFIG3, data | URTW_CONFIG3_ANAPARAM_WRITE);
urtw_write32_m(sc, URTW_ANAPARAM, val);
urtw_read8_m(sc, URTW_CONFIG3, &data);
urtw_write8_m(sc, URTW_CONFIG3, data & ~URTW_CONFIG3_ANAPARAM_WRITE);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
fail:
return (error);
}
static usb_error_t
urtw_8185_set_anaparam2(struct urtw_softc *sc, uint32_t val)
{
uint8_t data;
usb_error_t error;
error = urtw_set_mode(sc, URTW_EPROM_CMD_CONFIG);
if (error)
goto fail;
urtw_read8_m(sc, URTW_CONFIG3, &data);
urtw_write8_m(sc, URTW_CONFIG3, data | URTW_CONFIG3_ANAPARAM_WRITE);
urtw_write32_m(sc, URTW_ANAPARAM2, val);
urtw_read8_m(sc, URTW_CONFIG3, &data);
urtw_write8_m(sc, URTW_CONFIG3, data & ~URTW_CONFIG3_ANAPARAM_WRITE);
error = urtw_set_mode(sc, URTW_EPROM_CMD_NORMAL);
if (error)
goto fail;
fail:
return (error);
}
static usb_error_t
urtw_intr_enable(struct urtw_softc *sc)
{
usb_error_t error;
urtw_write16_m(sc, URTW_INTR_MASK, 0xffff);
fail:
return (error);
}
static usb_error_t
urtw_intr_disable(struct urtw_softc *sc)
{
usb_error_t error;
urtw_write16_m(sc, URTW_INTR_MASK, 0);
fail:
return (error);
}
static usb_error_t
urtw_reset(struct urtw_softc *sc)
{
uint8_t data;
usb_error_t error;
error = urtw_8180_set_anaparam(sc, URTW_8225_ANAPARAM_ON);
if (error)
goto fail;
error = urtw_8185_set_anaparam2(sc, URTW_8225_ANAPARAM2_ON);
if (error)
goto fail;
error = urtw_intr_disable(sc);
if (error)
goto fail;
usb_pause_mtx(&sc->sc_mtx, 100);
error = urtw_write8e(sc, 0x18, 0x10);
if (error != 0)
goto fail;
error = urtw_write8e(sc, 0x18, 0x11);
if (error != 0)
goto fail;
error = urtw_write8e(sc, 0x18, 0x00);
if (error != 0)
goto fail;
usb_pause_mtx(&sc->sc_mtx, 100);
urtw_read8_m(sc, URTW_CMD, &data);
data = (data & 0x2) | URTW_CMD_RST;
urtw_write8_m(sc, URTW_CMD, data);
usb_pause_mtx(&sc->sc_mtx, 100);
urtw_read8_m(sc, URTW_CMD, &data);
if (data & URTW_CMD_RST) {
device_printf(sc->sc_dev, "reset timeout\n");
goto fail;
}
error = urtw_set_mode(sc, URTW_EPROM_CMD_LOAD);
if (error)
goto fail;
usb_pause_mtx(&sc->sc_mtx, 100);
error = urtw_8180_set_anaparam(sc, URTW_8225_ANAPARAM_ON);
if (error)
goto fail;
error = urtw_8185_set_anaparam2(sc, URTW_8225_ANAPARAM2_ON);
if (error)
goto fail;
fail:
return (error);
}
static usb_error_t
urtw_led_ctl(struct urtw_softc *sc, int mode)
{
usb_error_t error = 0;
switch (sc->sc_strategy) {
case URTW_SW_LED_MODE0:
error = urtw_led_mode0(sc, mode);
break;
case URTW_SW_LED_MODE1:
error = urtw_led_mode1(sc, mode);
break;
case URTW_SW_LED_MODE2:
error = urtw_led_mode2(sc, mode);
break;
case URTW_SW_LED_MODE3:
error = urtw_led_mode3(sc, mode);
break;
default:
DPRINTF(sc, URTW_DEBUG_STATE,
"unsupported LED mode %d\n", sc->sc_strategy);
error = USB_ERR_INVAL;
break;
}
return (error);
}
static usb_error_t
urtw_led_mode0(struct urtw_softc *sc, int mode)
{
switch (mode) {
case URTW_LED_CTL_POWER_ON:
sc->sc_gpio_ledstate = URTW_LED_POWER_ON_BLINK;
break;
case URTW_LED_CTL_TX:
if (sc->sc_gpio_ledinprogress == 1)
return (0);
sc->sc_gpio_ledstate = URTW_LED_BLINK_NORMAL;
sc->sc_gpio_blinktime = 2;
break;
case URTW_LED_CTL_LINK:
sc->sc_gpio_ledstate = URTW_LED_ON;
break;
default:
DPRINTF(sc, URTW_DEBUG_STATE,
"unsupported LED mode 0x%x", mode);
return (USB_ERR_INVAL);
}
switch (sc->sc_gpio_ledstate) {
case URTW_LED_ON:
if (sc->sc_gpio_ledinprogress != 0)
break;
urtw_led_on(sc, URTW_LED_GPIO);
break;
case URTW_LED_BLINK_NORMAL:
if (sc->sc_gpio_ledinprogress != 0)
break;
sc->sc_gpio_ledinprogress = 1;
sc->sc_gpio_blinkstate = (sc->sc_gpio_ledon != 0) ?
URTW_LED_OFF : URTW_LED_ON;
usb_callout_reset(&sc->sc_led_ch, hz, urtw_led_ch, sc);
break;
case URTW_LED_POWER_ON_BLINK:
urtw_led_on(sc, URTW_LED_GPIO);
usb_pause_mtx(&sc->sc_mtx, 100);
urtw_led_off(sc, URTW_LED_GPIO);
break;
default:
DPRINTF(sc, URTW_DEBUG_STATE,
"unknown LED status 0x%x", sc->sc_gpio_ledstate);
return (USB_ERR_INVAL);
}
return (0);
}
static usb_error_t
urtw_led_mode1(struct urtw_softc *sc, int mode)
{
return (USB_ERR_INVAL);
}
static usb_error_t
urtw_led_mode2(struct urtw_softc *sc, int mode)
{
return (USB_ERR_INVAL);
}
static usb_error_t
urtw_led_mode3(struct urtw_softc *sc, int mode)
{
return (USB_ERR_INVAL);
}
static usb_error_t
urtw_led_on(struct urtw_softc *sc, int type)
{
usb_error_t error;
if (type == URTW_LED_GPIO) {
switch (sc->sc_gpio_ledpin) {
case URTW_LED_PIN_GPIO0:
urtw_write8_m(sc, URTW_GPIO, 0x01);
urtw_write8_m(sc, URTW_GP_ENABLE, 0x00);
break;
default:
DPRINTF(sc, URTW_DEBUG_STATE,
"unsupported LED PIN type 0x%x",
sc->sc_gpio_ledpin);
error = USB_ERR_INVAL;
goto fail;
}
} else {
DPRINTF(sc, URTW_DEBUG_STATE,
"unsupported LED type 0x%x", type);
error = USB_ERR_INVAL;
goto fail;
}
sc->sc_gpio_ledon = 1;
fail:
return (error);
}
static usb_error_t
urtw_led_off(struct urtw_softc *sc, int type)
{
usb_error_t error;
if (type == URTW_LED_GPIO) {
switch (sc->sc_gpio_ledpin) {
case URTW_LED_PIN_GPIO0:
urtw_write8_m(sc, URTW_GPIO, URTW_GPIO_DATA_MAGIC1);
urtw_write8_m(sc,
URTW_GP_ENABLE, URTW_GP_ENABLE_DATA_MAGIC1);
break;
default:
DPRINTF(sc, URTW_DEBUG_STATE,
"unsupported LED PIN type 0x%x",
sc->sc_gpio_ledpin);
error = USB_ERR_INVAL;
goto fail;
}
} else {
DPRINTF(sc, URTW_DEBUG_STATE,
"unsupported LED type 0x%x", type);
error = USB_ERR_INVAL;
goto fail;
}
sc->sc_gpio_ledon = 0;
fail:
return (error);
}
static void
urtw_led_ch(void *arg)
{
struct urtw_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
ieee80211_runtask(ic, &sc->sc_led_task);
}
static void
urtw_ledtask(void *arg, int pending)
{
struct urtw_softc *sc = arg;
if (sc->sc_strategy != URTW_SW_LED_MODE0) {
DPRINTF(sc, URTW_DEBUG_STATE,
"could not process a LED strategy 0x%x",
sc->sc_strategy);
return;
}
URTW_LOCK(sc);
urtw_led_blink(sc);
URTW_UNLOCK(sc);
}
static usb_error_t
urtw_led_blink(struct urtw_softc *sc)
{
uint8_t ing = 0;
usb_error_t error;
if (sc->sc_gpio_blinkstate == URTW_LED_ON)
error = urtw_led_on(sc, URTW_LED_GPIO);
else
error = urtw_led_off(sc, URTW_LED_GPIO);
sc->sc_gpio_blinktime--;
if (sc->sc_gpio_blinktime == 0)
ing = 1;
else {
if (sc->sc_gpio_ledstate != URTW_LED_BLINK_NORMAL &&
sc->sc_gpio_ledstate != URTW_LED_BLINK_SLOWLY &&
sc->sc_gpio_ledstate != URTW_LED_BLINK_CM3)
ing = 1;
}
if (ing == 1) {
if (sc->sc_gpio_ledstate == URTW_LED_ON &&
sc->sc_gpio_ledon == 0)
error = urtw_led_on(sc, URTW_LED_GPIO);
else if (sc->sc_gpio_ledstate == URTW_LED_OFF &&
sc->sc_gpio_ledon == 1)
error = urtw_led_off(sc, URTW_LED_GPIO);
sc->sc_gpio_blinktime = 0;
sc->sc_gpio_ledinprogress = 0;
return (0);
}
sc->sc_gpio_blinkstate = (sc->sc_gpio_blinkstate != URTW_LED_ON) ?
URTW_LED_ON : URTW_LED_OFF;
switch (sc->sc_gpio_ledstate) {
case URTW_LED_BLINK_NORMAL:
usb_callout_reset(&sc->sc_led_ch, hz, urtw_led_ch, sc);
break;
default:
DPRINTF(sc, URTW_DEBUG_STATE,
"unknown LED status 0x%x",
sc->sc_gpio_ledstate);
return (USB_ERR_INVAL);
}
return (0);
}
static usb_error_t
urtw_rx_enable(struct urtw_softc *sc)
{
uint8_t data;
usb_error_t error;
usbd_transfer_start((sc->sc_flags & URTW_RTL8187B) ?
sc->sc_xfer[URTW_8187B_BULK_RX] : sc->sc_xfer[URTW_8187L_BULK_RX]);
error = urtw_rx_setconf(sc);
if (error != 0)
goto fail;
if ((sc->sc_flags & URTW_RTL8187B) == 0) {
urtw_read8_m(sc, URTW_CMD, &data);
urtw_write8_m(sc, URTW_CMD, data | URTW_CMD_RX_ENABLE);
}
fail:
return (error);
}
static usb_error_t
urtw_tx_enable(struct urtw_softc *sc)
{
uint8_t data8;
uint32_t data;
usb_error_t error;
if (sc->sc_flags & URTW_RTL8187B) {
urtw_read32_m(sc, URTW_TX_CONF, &data);
data &= ~URTW_TX_LOOPBACK_MASK;
data &= ~(URTW_TX_DPRETRY_MASK | URTW_TX_RTSRETRY_MASK);
data &= ~(URTW_TX_NOCRC | URTW_TX_MXDMA_MASK);
data &= ~URTW_TX_SWPLCPLEN;
data |= URTW_TX_HW_SEQNUM | URTW_TX_DISREQQSIZE |
(7 << 8) | /* short retry limit */
(7 << 0) | /* long retry limit */
(7 << 21); /* MAX TX DMA */
urtw_write32_m(sc, URTW_TX_CONF, data);
urtw_read8_m(sc, URTW_MSR, &data8);
data8 |= URTW_MSR_LINK_ENEDCA;
urtw_write8_m(sc, URTW_MSR, data8);
return (error);
}
urtw_read8_m(sc, URTW_CW_CONF, &data8);
data8 &= ~(URTW_CW_CONF_PERPACKET_CW | URTW_CW_CONF_PERPACKET_RETRY);
urtw_write8_m(sc, URTW_CW_CONF, data8);
urtw_read8_m(sc, URTW_TX_AGC_CTL, &data8);
data8 &= ~URTW_TX_AGC_CTL_PERPACKET_GAIN;
data8 &= ~URTW_TX_AGC_CTL_PERPACKET_ANTSEL;
data8 &= ~URTW_TX_AGC_CTL_FEEDBACK_ANT;
urtw_write8_m(sc, URTW_TX_AGC_CTL, data8);
urtw_read32_m(sc, URTW_TX_CONF, &data);
data &= ~URTW_TX_LOOPBACK_MASK;
data |= URTW_TX_LOOPBACK_NONE;
data &= ~(URTW_TX_DPRETRY_MASK | URTW_TX_RTSRETRY_MASK);
data |= sc->sc_tx_retry << URTW_TX_DPRETRY_SHIFT;
data |= sc->sc_rts_retry << URTW_TX_RTSRETRY_SHIFT;
data &= ~(URTW_TX_NOCRC | URTW_TX_MXDMA_MASK);
data |= URTW_TX_MXDMA_2048 | URTW_TX_CWMIN | URTW_TX_DISCW;
data &= ~URTW_TX_SWPLCPLEN;
data |= URTW_TX_NOICV;
urtw_write32_m(sc, URTW_TX_CONF, data);
urtw_read8_m(sc, URTW_CMD, &data8);
urtw_write8_m(sc, URTW_CMD, data8 | URTW_CMD_TX_ENABLE);
fail:
return (error);
}
static usb_error_t
urtw_rx_setconf(struct urtw_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t data;
usb_error_t error;
urtw_read32_m(sc, URTW_RX, &data);
data = data &~ URTW_RX_FILTER_MASK;
if (sc->sc_flags & URTW_RTL8187B) {
data = data | URTW_RX_FILTER_MNG | URTW_RX_FILTER_DATA |
URTW_RX_FILTER_MCAST | URTW_RX_FILTER_BCAST |
URTW_RX_FILTER_NICMAC | URTW_RX_CHECK_BSSID |
URTW_RX_FIFO_THRESHOLD_NONE |
URTW_MAX_RX_DMA_2048 |
URTW_RX_AUTORESETPHY | URTW_RCR_ONLYERLPKT;
} else {
data = data | URTW_RX_FILTER_MNG | URTW_RX_FILTER_DATA;
data = data | URTW_RX_FILTER_BCAST | URTW_RX_FILTER_MCAST;
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
data = data | URTW_RX_FILTER_ICVERR;
data = data | URTW_RX_FILTER_PWR;
}
if (sc->sc_crcmon == 1 && ic->ic_opmode == IEEE80211_M_MONITOR)
data = data | URTW_RX_FILTER_CRCERR;
if (ic->ic_opmode == IEEE80211_M_MONITOR ||
- (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC))) {
+ ic->ic_promisc > 0 || ic->ic_allmulti > 0) {
data = data | URTW_RX_FILTER_ALLMAC;
} else {
data = data | URTW_RX_FILTER_NICMAC;
data = data | URTW_RX_CHECK_BSSID;
}
data = data &~ URTW_RX_FIFO_THRESHOLD_MASK;
data = data | URTW_RX_FIFO_THRESHOLD_NONE |
URTW_RX_AUTORESETPHY;
data = data &~ URTW_MAX_RX_DMA_MASK;
data = data | URTW_MAX_RX_DMA_2048 | URTW_RCR_ONLYERLPKT;
}
urtw_write32_m(sc, URTW_RX, data);
fail:
return (error);
}
static struct mbuf *
urtw_rxeof(struct usb_xfer *xfer, struct urtw_data *data, int *rssi_p,
int8_t *nf_p)
{
int actlen, flen, rssi;
struct ieee80211_frame *wh;
struct mbuf *m, *mnew;
struct urtw_softc *sc = data->sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t noise = 0, rate;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
if (actlen < (int)URTW_MIN_RXBUFSZ) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return (NULL);
}
if (sc->sc_flags & URTW_RTL8187B) {
struct urtw_8187b_rxhdr *rx;
rx = (struct urtw_8187b_rxhdr *)(data->buf +
(actlen - (sizeof(struct urtw_8187b_rxhdr))));
flen = le32toh(rx->flag) & 0xfff;
if (flen > actlen) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return (NULL);
}
rate = (le32toh(rx->flag) >> URTW_RX_FLAG_RXRATE_SHIFT) & 0xf;
/* XXX correct? */
rssi = rx->rssi & URTW_RX_RSSI_MASK;
noise = rx->noise;
} else {
struct urtw_8187l_rxhdr *rx;
rx = (struct urtw_8187l_rxhdr *)(data->buf +
(actlen - (sizeof(struct urtw_8187l_rxhdr))));
flen = le32toh(rx->flag) & 0xfff;
if (flen > actlen) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return (NULL);
}
rate = (le32toh(rx->flag) >> URTW_RX_FLAG_RXRATE_SHIFT) & 0xf;
/* XXX correct? */
rssi = rx->rssi & URTW_RX_8187L_RSSI_MASK;
noise = rx->noise;
}
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return (NULL);
}
m = data->m;
data->m = mnew;
data->buf = mtod(mnew, uint8_t *);
/* finalize mbuf */
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = flen - IEEE80211_CRC_LEN;
if (ieee80211_radiotap_active(ic)) {
struct urtw_rx_radiotap_header *tap = &sc->sc_rxtap;
/* XXX Are variables correct? */
tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq);
tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
tap->wr_dbm_antsignal = (int8_t)rssi;
}
wh = mtod(m, struct ieee80211_frame *);
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA)
sc->sc_currate = (rate > 0) ? rate : sc->sc_currate;
*rssi_p = rssi;
*nf_p = noise; /* XXX correct? */
return (m);
}
static void
urtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct urtw_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *m = NULL;
struct urtw_data *data;
int8_t nf = -95;
int rssi = 1;
URTW_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data == NULL)
goto setup;
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
m = urtw_rxeof(xfer, data, &rssi, &nf);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
/* FALLTHROUGH */
case USB_ST_SETUP:
setup:
data = STAILQ_FIRST(&sc->sc_rx_inactive);
if (data == NULL) {
KASSERT(m == NULL, ("mbuf isn't NULL"));
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next);
STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next);
usbd_xfer_set_frame_data(xfer, 0, data->buf,
usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
/*
* To avoid LOR we should unlock our private mutex here to call
* ieee80211_input() because here is at the end of a USB
* callback and safe to unlock.
*/
URTW_UNLOCK(sc);
if (m != NULL) {
wh = mtod(m, struct ieee80211_frame *);
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
/* node is no longer needed */
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
m = NULL;
}
URTW_LOCK(sc);
break;
default:
/* needs it to the inactive queue due to a error. */
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
}
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto setup;
}
break;
}
}
#define URTW_STATUS_TYPE_TXCLOSE 1
#define URTW_STATUS_TYPE_BEACON_INTR 0
static void
urtw_txstatus_eof(struct usb_xfer *xfer)
{
struct urtw_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
int actlen, type, pktretry, seq;
uint64_t val;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
if (actlen != sizeof(uint64_t))
return;
val = le64toh(sc->sc_txstatus);
type = (val >> 30) & 0x3;
if (type == URTW_STATUS_TYPE_TXCLOSE) {
pktretry = val & 0xff;
seq = (val >> 16) & 0xff;
if (pktretry == URTW_TX_MAXRETRY)
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(ic->ic_oerrors, 1);
DPRINTF(sc, URTW_DEBUG_TXSTATUS, "pktretry %d seq %#x\n",
pktretry, seq);
}
}
static void
urtw_bulk_tx_status_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct urtw_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
void *dma_buf = usbd_xfer_get_frame_buffer(xfer, 0);
URTW_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
urtw_txstatus_eof(xfer);
/* FALLTHROUGH */
case USB_ST_SETUP:
setup:
memcpy(dma_buf, &sc->sc_txstatus, sizeof(uint64_t));
usbd_xfer_set_frame_len(xfer, 0, sizeof(uint64_t));
usbd_transfer_submit(xfer);
break;
default:
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto setup;
}
break;
}
}
static void
urtw_txeof(struct usb_xfer *xfer, struct urtw_data *data)
{
struct urtw_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct mbuf *m;
URTW_ASSERT_LOCKED(sc);
- /*
- * Do any tx complete callback. Note this must be done before releasing
- * the node reference.
- */
if (data->m) {
- m = data->m;
- if (m->m_flags & M_TXCB) {
- /* XXX status? */
- ieee80211_process_callback(data->ni, m, 0);
- }
- m_freem(m);
+ /* XXX status? */
+ ieee80211_tx_complete(data->ni, data->m, 0);
data->m = NULL;
- }
- if (data->ni) {
- ieee80211_free_node(data->ni);
data->ni = NULL;
}
sc->sc_txtimer = 0;
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
static void
urtw_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct urtw_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
struct urtw_data *data;
URTW_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_tx_active);
if (data == NULL)
goto setup;
STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next);
urtw_txeof(xfer, data);
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next);
/* FALLTHROUGH */
case USB_ST_SETUP:
setup:
data = STAILQ_FIRST(&sc->sc_tx_pending);
if (data == NULL) {
DPRINTF(sc, URTW_DEBUG_XMIT,
"%s: empty pending queue\n", __func__);
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next);
STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next);
usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen);
usbd_transfer_submit(xfer);
- URTW_UNLOCK(sc);
- urtw_start(ifp);
- URTW_LOCK(sc);
+ urtw_start(sc);
break;
default:
data = STAILQ_FIRST(&sc->sc_tx_active);
if (data == NULL)
goto setup;
if (data->ni != NULL) {
+ if_inc_counter(data->ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(data->ni);
data->ni = NULL;
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto setup;
}
break;
}
}
static struct urtw_data *
_urtw_getbuf(struct urtw_softc *sc)
{
struct urtw_data *bf;
bf = STAILQ_FIRST(&sc->sc_tx_inactive);
if (bf != NULL)
STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next);
else
bf = NULL;
if (bf == NULL)
DPRINTF(sc, URTW_DEBUG_XMIT, "%s: %s\n", __func__,
"out of xmit buffers");
return (bf);
}
static struct urtw_data *
urtw_getbuf(struct urtw_softc *sc)
{
struct urtw_data *bf;
URTW_ASSERT_LOCKED(sc);
bf = _urtw_getbuf(sc);
- if (bf == NULL) {
- struct ifnet *ifp = sc->sc_ifp;
-
+ if (bf == NULL)
DPRINTF(sc, URTW_DEBUG_XMIT, "%s: stop queue\n", __func__);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- }
return (bf);
}
static int
urtw_isbmode(uint16_t rate)
{
return ((rate <= 22 && rate != 12 && rate != 18) ||
rate == 44) ? (1) : (0);
}
static uint16_t
urtw_rate2dbps(uint16_t rate)
{
switch(rate) {
case 12:
case 18:
case 24:
case 36:
case 48:
case 72:
case 96:
case 108:
return (rate * 2);
default:
break;
}
return (24);
}
static int
urtw_compute_txtime(uint16_t framelen, uint16_t rate,
uint8_t ismgt, uint8_t isshort)
{
uint16_t ceiling, frametime, n_dbps;
if (urtw_isbmode(rate)) {
if (ismgt || !isshort || rate == 2)
frametime = (uint16_t)(144 + 48 +
(framelen * 8 / (rate / 2)));
else
frametime = (uint16_t)(72 + 24 +
(framelen * 8 / (rate / 2)));
if ((framelen * 8 % (rate / 2)) != 0)
frametime++;
} else {
n_dbps = urtw_rate2dbps(rate);
ceiling = (16 + 8 * framelen + 6) / n_dbps
+ (((16 + 8 * framelen + 6) % n_dbps) ? 1 : 0);
frametime = (uint16_t)(16 + 4 + 4 * ceiling + 6);
}
return (frametime);
}
/*
* Callback from the 802.11 layer to update the
* slot time based on the current setting.
*/
static void
urtw_updateslot(struct ieee80211com *ic)
{
struct urtw_softc *sc = ic->ic_softc;
ieee80211_runtask(ic, &sc->sc_updateslot_task);
}
static void
urtw_updateslottask(void *arg, int pending)
{
struct urtw_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
int error;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
URTW_LOCK(sc);
+ if ((sc->sc_flags & URTW_RUNNING) == 0) {
+ URTW_UNLOCK(sc);
+ return;
+ }
if (sc->sc_flags & URTW_RTL8187B) {
urtw_write8_m(sc, URTW_SIFS, 0x22);
if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
urtw_write8_m(sc, URTW_SLOT, 0x9);
else
urtw_write8_m(sc, URTW_SLOT, 0x14);
urtw_write8_m(sc, URTW_8187B_EIFS, 0x5b);
urtw_write8_m(sc, URTW_CARRIER_SCOUNT, 0x5b);
} else {
urtw_write8_m(sc, URTW_SIFS, 0x22);
if (sc->sc_state == IEEE80211_S_ASSOC &&
ic->ic_flags & IEEE80211_F_SHSLOT)
urtw_write8_m(sc, URTW_SLOT, 0x9);
else
urtw_write8_m(sc, URTW_SLOT, 0x14);
if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
urtw_write8_m(sc, URTW_DIFS, 0x14);
urtw_write8_m(sc, URTW_EIFS, 0x5b - 0x14);
urtw_write8_m(sc, URTW_CW_VAL, 0x73);
} else {
urtw_write8_m(sc, URTW_DIFS, 0x24);
urtw_write8_m(sc, URTW_EIFS, 0x5b - 0x24);
urtw_write8_m(sc, URTW_CW_VAL, 0xa5);
}
}
fail:
URTW_UNLOCK(sc);
}
static void
urtw_sysctl_node(struct urtw_softc *sc)
{
#define URTW_SYSCTL_STAT_ADD32(c, h, n, p, d) \
SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *child, *parent;
struct sysctl_oid *tree;
struct urtw_stats *stats = &sc->sc_stats;
ctx = device_get_sysctl_ctx(sc->sc_dev);
child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
NULL, "URTW statistics");
parent = SYSCTL_CHILDREN(tree);
/* Tx statistics. */
tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
NULL, "Tx MAC statistics");
child = SYSCTL_CHILDREN(tree);
URTW_SYSCTL_STAT_ADD32(ctx, child, "1m", &stats->txrates[0],
"1 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "2m", &stats->txrates[1],
"2 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "5.5m", &stats->txrates[2],
"5.5 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "6m", &stats->txrates[4],
"6 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "9m", &stats->txrates[5],
"9 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "11m", &stats->txrates[3],
"11 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "12m", &stats->txrates[6],
"12 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "18m", &stats->txrates[7],
"18 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "24m", &stats->txrates[8],
"24 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "36m", &stats->txrates[9],
"36 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "48m", &stats->txrates[10],
"48 Mbit/s");
URTW_SYSCTL_STAT_ADD32(ctx, child, "54m", &stats->txrates[11],
"54 Mbit/s");
#undef URTW_SYSCTL_STAT_ADD32
}
static device_method_t urtw_methods[] = {
DEVMETHOD(device_probe, urtw_match),
DEVMETHOD(device_attach, urtw_attach),
DEVMETHOD(device_detach, urtw_detach),
DEVMETHOD_END
};
static driver_t urtw_driver = {
.name = "urtw",
.methods = urtw_methods,
.size = sizeof(struct urtw_softc)
};
static devclass_t urtw_devclass;
DRIVER_MODULE(urtw, uhub, urtw_driver, urtw_devclass, NULL, 0);
MODULE_DEPEND(urtw, wlan, 1, 1, 1);
MODULE_DEPEND(urtw, usb, 1, 1, 1);
MODULE_VERSION(urtw, 1);
Index: head/sys/dev/usb/wlan/if_urtwn.c
===================================================================
--- head/sys/dev/usb/wlan/if_urtwn.c (revision 287196)
+++ head/sys/dev/usb/wlan/if_urtwn.c (revision 287197)
@@ -1,3586 +1,3496 @@
/* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */
/*-
* Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2014 Kevin Lo <kevlo@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for Realtek RTL8188CE-VAU/RTL8188CUS/RTL8188EU/RTL8188RU/RTL8192CU.
*/
#include <sys/param.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <sys/kdb.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include "usbdevs.h"
#define USB_DEBUG_VAR urtwn_debug
#include <dev/usb/usb_debug.h>
#include <dev/usb/wlan/if_urtwnreg.h>
#ifdef USB_DEBUG
static int urtwn_debug = 0;
SYSCTL_NODE(_hw_usb, OID_AUTO, urtwn, CTLFLAG_RW, 0, "USB urtwn");
SYSCTL_INT(_hw_usb_urtwn, OID_AUTO, debug, CTLFLAG_RWTUN, &urtwn_debug, 0,
"Debug level");
#endif
#define URTWN_RSSI(r) (r) - 110
#define IEEE80211_HAS_ADDR4(wh) \
(((wh)->i_fc[1] & IEEE80211_FC1_DIR_MASK) == IEEE80211_FC1_DIR_DSTODS)
/* various supported device vendors/products */
static const STRUCT_USB_HOST_ID urtwn_devs[] = {
#define URTWN_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) }
#define URTWN_RTL8188E_DEV(v,p) \
{ USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, URTWN_RTL8188E) }
#define URTWN_RTL8188E 1
URTWN_DEV(ABOCOM, RTL8188CU_1),
URTWN_DEV(ABOCOM, RTL8188CU_2),
URTWN_DEV(ABOCOM, RTL8192CU),
URTWN_DEV(ASUS, RTL8192CU),
URTWN_DEV(ASUS, USBN10NANO),
URTWN_DEV(AZUREWAVE, RTL8188CE_1),
URTWN_DEV(AZUREWAVE, RTL8188CE_2),
URTWN_DEV(AZUREWAVE, RTL8188CU),
URTWN_DEV(BELKIN, F7D2102),
URTWN_DEV(BELKIN, RTL8188CU),
URTWN_DEV(BELKIN, RTL8192CU),
URTWN_DEV(CHICONY, RTL8188CUS_1),
URTWN_DEV(CHICONY, RTL8188CUS_2),
URTWN_DEV(CHICONY, RTL8188CUS_3),
URTWN_DEV(CHICONY, RTL8188CUS_4),
URTWN_DEV(CHICONY, RTL8188CUS_5),
URTWN_DEV(COREGA, RTL8192CU),
URTWN_DEV(DLINK, RTL8188CU),
URTWN_DEV(DLINK, RTL8192CU_1),
URTWN_DEV(DLINK, RTL8192CU_2),
URTWN_DEV(DLINK, RTL8192CU_3),
URTWN_DEV(DLINK, DWA131B),
URTWN_DEV(EDIMAX, EW7811UN),
URTWN_DEV(EDIMAX, RTL8192CU),
URTWN_DEV(FEIXUN, RTL8188CU),
URTWN_DEV(FEIXUN, RTL8192CU),
URTWN_DEV(GUILLEMOT, HWNUP150),
URTWN_DEV(HAWKING, RTL8192CU),
URTWN_DEV(HP3, RTL8188CU),
URTWN_DEV(NETGEAR, WNA1000M),
URTWN_DEV(NETGEAR, RTL8192CU),
URTWN_DEV(NETGEAR4, RTL8188CU),
URTWN_DEV(NOVATECH, RTL8188CU),
URTWN_DEV(PLANEX2, RTL8188CU_1),
URTWN_DEV(PLANEX2, RTL8188CU_2),
URTWN_DEV(PLANEX2, RTL8188CU_3),
URTWN_DEV(PLANEX2, RTL8188CU_4),
URTWN_DEV(PLANEX2, RTL8188CUS),
URTWN_DEV(PLANEX2, RTL8192CU),
URTWN_DEV(REALTEK, RTL8188CE_0),
URTWN_DEV(REALTEK, RTL8188CE_1),
URTWN_DEV(REALTEK, RTL8188CTV),
URTWN_DEV(REALTEK, RTL8188CU_0),
URTWN_DEV(REALTEK, RTL8188CU_1),
URTWN_DEV(REALTEK, RTL8188CU_2),
URTWN_DEV(REALTEK, RTL8188CU_3),
URTWN_DEV(REALTEK, RTL8188CU_COMBO),
URTWN_DEV(REALTEK, RTL8188CUS),
URTWN_DEV(REALTEK, RTL8188RU_1),
URTWN_DEV(REALTEK, RTL8188RU_2),
URTWN_DEV(REALTEK, RTL8188RU_3),
URTWN_DEV(REALTEK, RTL8191CU),
URTWN_DEV(REALTEK, RTL8192CE),
URTWN_DEV(REALTEK, RTL8192CU),
URTWN_DEV(SITECOMEU, RTL8188CU_1),
URTWN_DEV(SITECOMEU, RTL8188CU_2),
URTWN_DEV(SITECOMEU, RTL8192CU),
URTWN_DEV(TRENDNET, RTL8188CU),
URTWN_DEV(TRENDNET, RTL8192CU),
URTWN_DEV(ZYXEL, RTL8192CU),
/* URTWN_RTL8188E */
URTWN_RTL8188E_DEV(DLINK, DWA123D1),
URTWN_RTL8188E_DEV(DLINK, DWA125D1),
URTWN_RTL8188E_DEV(ELECOM, WDC150SU2M),
URTWN_RTL8188E_DEV(REALTEK, RTL8188ETV),
URTWN_RTL8188E_DEV(REALTEK, RTL8188EU),
#undef URTWN_RTL8188E_DEV
#undef URTWN_DEV
};
static device_probe_t urtwn_match;
static device_attach_t urtwn_attach;
static device_detach_t urtwn_detach;
static usb_callback_t urtwn_bulk_tx_callback;
static usb_callback_t urtwn_bulk_rx_callback;
-static usb_error_t urtwn_do_request(struct urtwn_softc *sc,
- struct usb_device_request *req, void *data);
+static usb_error_t urtwn_do_request(struct urtwn_softc *,
+ struct usb_device_request *, void *);
static struct ieee80211vap *urtwn_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void urtwn_vap_delete(struct ieee80211vap *);
static struct mbuf * urtwn_rx_frame(struct urtwn_softc *, uint8_t *, int,
int *);
static struct mbuf * urtwn_rxeof(struct usb_xfer *, struct urtwn_data *,
int *, int8_t *);
static void urtwn_txeof(struct usb_xfer *, struct urtwn_data *);
static int urtwn_alloc_list(struct urtwn_softc *,
struct urtwn_data[], int, int);
static int urtwn_alloc_rx_list(struct urtwn_softc *);
static int urtwn_alloc_tx_list(struct urtwn_softc *);
static void urtwn_free_tx_list(struct urtwn_softc *);
static void urtwn_free_rx_list(struct urtwn_softc *);
static void urtwn_free_list(struct urtwn_softc *,
struct urtwn_data data[], int);
static struct urtwn_data * _urtwn_getbuf(struct urtwn_softc *);
static struct urtwn_data * urtwn_getbuf(struct urtwn_softc *);
static int urtwn_write_region_1(struct urtwn_softc *, uint16_t,
uint8_t *, int);
static void urtwn_write_1(struct urtwn_softc *, uint16_t, uint8_t);
static void urtwn_write_2(struct urtwn_softc *, uint16_t, uint16_t);
static void urtwn_write_4(struct urtwn_softc *, uint16_t, uint32_t);
static int urtwn_read_region_1(struct urtwn_softc *, uint16_t,
uint8_t *, int);
static uint8_t urtwn_read_1(struct urtwn_softc *, uint16_t);
static uint16_t urtwn_read_2(struct urtwn_softc *, uint16_t);
static uint32_t urtwn_read_4(struct urtwn_softc *, uint16_t);
static int urtwn_fw_cmd(struct urtwn_softc *, uint8_t,
const void *, int);
static void urtwn_r92c_rf_write(struct urtwn_softc *, int,
uint8_t, uint32_t);
static void urtwn_r88e_rf_write(struct urtwn_softc *, int,
uint8_t, uint32_t);
static uint32_t urtwn_rf_read(struct urtwn_softc *, int, uint8_t);
static int urtwn_llt_write(struct urtwn_softc *, uint32_t,
uint32_t);
static uint8_t urtwn_efuse_read_1(struct urtwn_softc *, uint16_t);
static void urtwn_efuse_read(struct urtwn_softc *);
static void urtwn_efuse_switch_power(struct urtwn_softc *);
static int urtwn_read_chipid(struct urtwn_softc *);
static void urtwn_read_rom(struct urtwn_softc *);
static void urtwn_r88e_read_rom(struct urtwn_softc *);
static int urtwn_ra_init(struct urtwn_softc *);
static void urtwn_tsf_sync_enable(struct urtwn_softc *);
static void urtwn_set_led(struct urtwn_softc *, int, int);
static int urtwn_newstate(struct ieee80211vap *,
enum ieee80211_state, int);
static void urtwn_watchdog(void *);
static void urtwn_update_avgrssi(struct urtwn_softc *, int, int8_t);
static int8_t urtwn_get_rssi(struct urtwn_softc *, int, void *);
static int8_t urtwn_r88e_get_rssi(struct urtwn_softc *, int, void *);
static int urtwn_tx_start(struct urtwn_softc *,
struct ieee80211_node *, struct mbuf *,
struct urtwn_data *);
-static void urtwn_start(struct ifnet *);
-static void urtwn_start_locked(struct ifnet *,
- struct urtwn_softc *);
-static int urtwn_ioctl(struct ifnet *, u_long, caddr_t);
+static int urtwn_transmit(struct ieee80211com *, struct mbuf *);
+static void urtwn_start(struct urtwn_softc *);
+static void urtwn_parent(struct ieee80211com *);
static int urtwn_r92c_power_on(struct urtwn_softc *);
static int urtwn_r88e_power_on(struct urtwn_softc *);
static int urtwn_llt_init(struct urtwn_softc *);
static void urtwn_fw_reset(struct urtwn_softc *);
static void urtwn_r88e_fw_reset(struct urtwn_softc *);
static int urtwn_fw_loadpage(struct urtwn_softc *, int,
const uint8_t *, int);
static int urtwn_load_firmware(struct urtwn_softc *);
static int urtwn_r92c_dma_init(struct urtwn_softc *);
static int urtwn_r88e_dma_init(struct urtwn_softc *);
static void urtwn_mac_init(struct urtwn_softc *);
static void urtwn_bb_init(struct urtwn_softc *);
static void urtwn_rf_init(struct urtwn_softc *);
static void urtwn_cam_init(struct urtwn_softc *);
static void urtwn_pa_bias_init(struct urtwn_softc *);
static void urtwn_rxfilter_init(struct urtwn_softc *);
static void urtwn_edca_init(struct urtwn_softc *);
static void urtwn_write_txpower(struct urtwn_softc *, int,
uint16_t[]);
static void urtwn_get_txpower(struct urtwn_softc *, int,
struct ieee80211_channel *,
struct ieee80211_channel *, uint16_t[]);
static void urtwn_r88e_get_txpower(struct urtwn_softc *, int,
struct ieee80211_channel *,
struct ieee80211_channel *, uint16_t[]);
static void urtwn_set_txpower(struct urtwn_softc *,
struct ieee80211_channel *,
struct ieee80211_channel *);
static void urtwn_scan_start(struct ieee80211com *);
static void urtwn_scan_end(struct ieee80211com *);
static void urtwn_set_channel(struct ieee80211com *);
static void urtwn_set_chan(struct urtwn_softc *,
struct ieee80211_channel *,
struct ieee80211_channel *);
static void urtwn_update_mcast(struct ieee80211com *);
static void urtwn_iq_calib(struct urtwn_softc *);
static void urtwn_lc_calib(struct urtwn_softc *);
-static void urtwn_init(void *);
-static void urtwn_init_locked(void *);
-static void urtwn_stop(struct ifnet *);
-static void urtwn_stop_locked(struct ifnet *);
+static void urtwn_init(struct urtwn_softc *);
+static void urtwn_stop(struct urtwn_softc *);
static void urtwn_abort_xfers(struct urtwn_softc *);
static int urtwn_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static void urtwn_ms_delay(struct urtwn_softc *);
/* Aliases. */
#define urtwn_bb_write urtwn_write_4
#define urtwn_bb_read urtwn_read_4
static const struct usb_config urtwn_config[URTWN_N_TRANSFER] = {
[URTWN_BULK_RX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = URTWN_RXBUFSZ,
.flags = {
.pipe_bof = 1,
.short_xfer_ok = 1
},
.callback = urtwn_bulk_rx_callback,
},
[URTWN_BULK_TX_BE] = {
.type = UE_BULK,
.endpoint = 0x03,
.direction = UE_DIR_OUT,
.bufsize = URTWN_TXBUFSZ,
.flags = {
.ext_buffer = 1,
.pipe_bof = 1,
.force_short_xfer = 1
},
.callback = urtwn_bulk_tx_callback,
.timeout = URTWN_TX_TIMEOUT, /* ms */
},
[URTWN_BULK_TX_BK] = {
.type = UE_BULK,
.endpoint = 0x03,
.direction = UE_DIR_OUT,
.bufsize = URTWN_TXBUFSZ,
.flags = {
.ext_buffer = 1,
.pipe_bof = 1,
.force_short_xfer = 1,
},
.callback = urtwn_bulk_tx_callback,
.timeout = URTWN_TX_TIMEOUT, /* ms */
},
[URTWN_BULK_TX_VI] = {
.type = UE_BULK,
.endpoint = 0x02,
.direction = UE_DIR_OUT,
.bufsize = URTWN_TXBUFSZ,
.flags = {
.ext_buffer = 1,
.pipe_bof = 1,
.force_short_xfer = 1
},
.callback = urtwn_bulk_tx_callback,
.timeout = URTWN_TX_TIMEOUT, /* ms */
},
[URTWN_BULK_TX_VO] = {
.type = UE_BULK,
.endpoint = 0x02,
.direction = UE_DIR_OUT,
.bufsize = URTWN_TXBUFSZ,
.flags = {
.ext_buffer = 1,
.pipe_bof = 1,
.force_short_xfer = 1
},
.callback = urtwn_bulk_tx_callback,
.timeout = URTWN_TX_TIMEOUT, /* ms */
},
};
static int
urtwn_match(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != URTWN_CONFIG_INDEX)
return (ENXIO);
if (uaa->info.bIfaceIndex != URTWN_IFACE_INDEX)
return (ENXIO);
return (usbd_lookup_id_by_uaa(urtwn_devs, sizeof(urtwn_devs), uaa));
}
static int
urtwn_attach(device_t self)
{
struct usb_attach_arg *uaa = device_get_ivars(self);
struct urtwn_softc *sc = device_get_softc(self);
- struct ifnet *ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t iface_index, bands;
int error;
device_set_usb_desc(self);
sc->sc_udev = uaa->device;
sc->sc_dev = self;
if (USB_GET_DRIVER_INFO(uaa) == URTWN_RTL8188E)
sc->chip |= URTWN_CHIP_88E;
mtx_init(&sc->sc_mtx, device_get_nameunit(self),
MTX_NETWORK_LOCK, MTX_DEF);
callout_init(&sc->sc_watchdog_ch, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
iface_index = URTWN_IFACE_INDEX;
error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
urtwn_config, URTWN_N_TRANSFER, sc, &sc->sc_mtx);
if (error) {
device_printf(self, "could not allocate USB transfers, "
"err=%s\n", usbd_errstr(error));
goto detach;
}
URTWN_LOCK(sc);
error = urtwn_read_chipid(sc);
if (error) {
device_printf(sc->sc_dev, "unsupported test chip\n");
URTWN_UNLOCK(sc);
goto detach;
}
/* Determine number of Tx/Rx chains. */
if (sc->chip & URTWN_CHIP_92C) {
sc->ntxchains = (sc->chip & URTWN_CHIP_92C_1T2R) ? 1 : 2;
sc->nrxchains = 2;
} else {
sc->ntxchains = 1;
sc->nrxchains = 1;
}
if (sc->chip & URTWN_CHIP_88E)
urtwn_r88e_read_rom(sc);
else
urtwn_read_rom(sc);
device_printf(sc->sc_dev, "MAC/BB RTL%s, RF 6052 %dT%dR\n",
(sc->chip & URTWN_CHIP_92C) ? "8192CU" :
(sc->chip & URTWN_CHIP_88E) ? "8188EU" :
(sc->board_type == R92C_BOARD_TYPE_HIGHPA) ? "8188RU" :
(sc->board_type == R92C_BOARD_TYPE_MINICARD) ? "8188CE-VAU" :
"8188CUS", sc->ntxchains, sc->nrxchains);
URTWN_UNLOCK(sc);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- goto detach;
- }
- ic = ifp->if_l2com;
-
- ifp->if_softc = sc;
- if_initname(ifp, "urtwn", device_get_unit(sc->sc_dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = urtwn_init;
- ifp->if_ioctl = urtwn_ioctl;
- ifp->if_start = urtwn_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(self);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
| IEEE80211_C_WPA /* 802.11i */
;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, sc->sc_bssid);
+ ieee80211_ifattach(ic);
ic->ic_raw_xmit = urtwn_raw_xmit;
ic->ic_scan_start = urtwn_scan_start;
ic->ic_scan_end = urtwn_scan_end;
ic->ic_set_channel = urtwn_set_channel;
-
+ ic->ic_transmit = urtwn_transmit;
+ ic->ic_parent = urtwn_parent;
ic->ic_vap_create = urtwn_vap_create;
ic->ic_vap_delete = urtwn_vap_delete;
ic->ic_update_mcast = urtwn_update_mcast;
ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr,
sizeof(sc->sc_txtap), URTWN_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
URTWN_RX_RADIOTAP_PRESENT);
if (bootverbose)
ieee80211_announce(ic);
return (0);
detach:
urtwn_detach(self);
return (ENXIO); /* failure */
}
static int
urtwn_detach(device_t self)
{
struct urtwn_softc *sc = device_get_softc(self);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
unsigned int x;
/* Prevent further ioctls. */
URTWN_LOCK(sc);
sc->sc_flags |= URTWN_DETACHED;
+ urtwn_stop(sc);
URTWN_UNLOCK(sc);
- urtwn_stop(ifp);
-
callout_drain(&sc->sc_watchdog_ch);
/* Prevent further allocations from RX/TX data lists. */
URTWN_LOCK(sc);
STAILQ_INIT(&sc->sc_tx_active);
STAILQ_INIT(&sc->sc_tx_inactive);
STAILQ_INIT(&sc->sc_tx_pending);
STAILQ_INIT(&sc->sc_rx_active);
STAILQ_INIT(&sc->sc_rx_inactive);
URTWN_UNLOCK(sc);
/* drain USB transfers */
for (x = 0; x != URTWN_N_TRANSFER; x++)
usbd_transfer_drain(sc->sc_xfer[x]);
/* Free data buffers. */
URTWN_LOCK(sc);
urtwn_free_tx_list(sc);
urtwn_free_rx_list(sc);
URTWN_UNLOCK(sc);
/* stop all USB transfers */
usbd_transfer_unsetup(sc->sc_xfer, URTWN_N_TRANSFER);
ieee80211_ifdetach(ic);
-
- if_free(ifp);
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static void
urtwn_free_tx_list(struct urtwn_softc *sc)
{
urtwn_free_list(sc, sc->sc_tx, URTWN_TX_LIST_COUNT);
}
static void
urtwn_free_rx_list(struct urtwn_softc *sc)
{
urtwn_free_list(sc, sc->sc_rx, URTWN_RX_LIST_COUNT);
}
static void
urtwn_free_list(struct urtwn_softc *sc, struct urtwn_data data[], int ndata)
{
int i;
for (i = 0; i < ndata; i++) {
struct urtwn_data *dp = &data[i];
if (dp->buf != NULL) {
free(dp->buf, M_USBDEV);
dp->buf = NULL;
}
if (dp->ni != NULL) {
ieee80211_free_node(dp->ni);
dp->ni = NULL;
}
}
}
static usb_error_t
urtwn_do_request(struct urtwn_softc *sc, struct usb_device_request *req,
void *data)
{
usb_error_t err;
int ntries = 10;
URTWN_ASSERT_LOCKED(sc);
while (ntries--) {
err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
req, data, 0, NULL, 250 /* ms */);
if (err == 0)
break;
DPRINTFN(1, "Control request failed, %s (retrying)\n",
usbd_errstr(err));
usb_pause_mtx(&sc->sc_mtx, hz / 100);
}
return (err);
}
static struct ieee80211vap *
urtwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct urtwn_vap *uvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return (NULL);
- uvp = (struct urtwn_vap *) malloc(sizeof(struct urtwn_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (uvp == NULL)
- return (NULL);
+ uvp = malloc(sizeof(struct urtwn_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &uvp->vap;
/* enable s/w bmiss handling for sta mode */
if (ieee80211_vap_setup(ic, vap, name, unit, opmode,
- flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) {
+ flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) {
/* out of memory */
free(uvp, M_80211_VAP);
return (NULL);
}
/* override state transition machine */
uvp->newstate = vap->iv_newstate;
vap->iv_newstate = urtwn_newstate;
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
- ieee80211_media_status);
+ ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return (vap);
}
static void
urtwn_vap_delete(struct ieee80211vap *vap)
{
struct urtwn_vap *uvp = URTWN_VAP(vap);
ieee80211_vap_detach(vap);
free(uvp, M_80211_VAP);
}
static struct mbuf *
urtwn_rx_frame(struct urtwn_softc *sc, uint8_t *buf, int pktlen, int *rssi_p)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct mbuf *m;
struct r92c_rx_stat *stat;
uint32_t rxdw0, rxdw3;
uint8_t rate;
int8_t rssi = 0;
int infosz;
/*
* don't pass packets to the ieee80211 framework if the driver isn't
* RUNNING.
*/
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ if (!(sc->sc_flags & URTWN_RUNNING))
return (NULL);
stat = (struct r92c_rx_stat *)buf;
rxdw0 = le32toh(stat->rxdw0);
rxdw3 = le32toh(stat->rxdw3);
if (rxdw0 & (R92C_RXDW0_CRCERR | R92C_RXDW0_ICVERR)) {
/*
* This should not happen since we setup our Rx filter
* to not receive these frames.
*/
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return (NULL);
}
if (pktlen < sizeof(*wh) || pktlen > MCLBYTES) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return (NULL);
}
rate = MS(rxdw3, R92C_RXDW3_RATE);
infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8;
/* Get RSSI from PHY status descriptor if present. */
if (infosz != 0 && (rxdw0 & R92C_RXDW0_PHYST)) {
if (sc->chip & URTWN_CHIP_88E)
rssi = urtwn_r88e_get_rssi(sc, rate, &stat[1]);
else
rssi = urtwn_get_rssi(sc, rate, &stat[1]);
/* Update our average RSSI. */
urtwn_update_avgrssi(sc, rate, rssi);
/*
* Convert the RSSI to a range that will be accepted
* by net80211.
*/
rssi = URTWN_RSSI(rssi);
}
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->sc_dev, "could not create RX mbuf\n");
return (NULL);
}
/* Finalize mbuf. */
- m->m_pkthdr.rcvif = ifp;
wh = (struct ieee80211_frame *)((uint8_t *)&stat[1] + infosz);
memcpy(mtod(m, uint8_t *), wh, pktlen);
m->m_pkthdr.len = m->m_len = pktlen;
if (ieee80211_radiotap_active(ic)) {
struct urtwn_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
/* Map HW rate index to 802.11 rate. */
if (!(rxdw3 & R92C_RXDW3_HT)) {
switch (rate) {
/* CCK. */
case 0: tap->wr_rate = 2; break;
case 1: tap->wr_rate = 4; break;
case 2: tap->wr_rate = 11; break;
case 3: tap->wr_rate = 22; break;
/* OFDM. */
case 4: tap->wr_rate = 12; break;
case 5: tap->wr_rate = 18; break;
case 6: tap->wr_rate = 24; break;
case 7: tap->wr_rate = 36; break;
case 8: tap->wr_rate = 48; break;
case 9: tap->wr_rate = 72; break;
case 10: tap->wr_rate = 96; break;
case 11: tap->wr_rate = 108; break;
}
} else if (rate >= 12) { /* MCS0~15. */
/* Bit 7 set means HT MCS instead of rate. */
tap->wr_rate = 0x80 | (rate - 12);
}
tap->wr_dbm_antsignal = rssi;
tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq);
tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
}
*rssi_p = rssi;
return (m);
}
static struct mbuf *
urtwn_rxeof(struct usb_xfer *xfer, struct urtwn_data *data, int *rssi,
int8_t *nf)
{
struct urtwn_softc *sc = data->sc;
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
struct r92c_rx_stat *stat;
struct mbuf *m, *m0 = NULL, *prevm = NULL;
uint32_t rxdw0;
uint8_t *buf;
int len, totlen, pktlen, infosz, npkts;
usbd_xfer_status(xfer, &len, NULL, NULL, NULL);
if (len < sizeof(*stat)) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return (NULL);
}
buf = data->buf;
/* Get the number of encapsulated frames. */
stat = (struct r92c_rx_stat *)buf;
npkts = MS(le32toh(stat->rxdw2), R92C_RXDW2_PKTCNT);
DPRINTFN(6, "Rx %d frames in one chunk\n", npkts);
/* Process all of them. */
while (npkts-- > 0) {
if (len < sizeof(*stat))
break;
stat = (struct r92c_rx_stat *)buf;
rxdw0 = le32toh(stat->rxdw0);
pktlen = MS(rxdw0, R92C_RXDW0_PKTLEN);
if (pktlen == 0)
break;
infosz = MS(rxdw0, R92C_RXDW0_INFOSZ) * 8;
/* Make sure everything fits in xfer. */
totlen = sizeof(*stat) + infosz + pktlen;
if (totlen > len)
break;
m = urtwn_rx_frame(sc, buf, pktlen, rssi);
if (m0 == NULL)
m0 = m;
if (prevm == NULL)
prevm = m;
else {
prevm->m_next = m;
prevm = m;
}
/* Next chunk is 128-byte aligned. */
totlen = (totlen + 127) & ~127;
buf += totlen;
len -= totlen;
}
return (m0);
}
static void
urtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct urtwn_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *m = NULL, *next;
struct urtwn_data *data;
int8_t nf;
int rssi = 1;
URTWN_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data == NULL)
goto tr_setup;
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
m = urtwn_rxeof(xfer, data, &rssi, &nf);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
data = STAILQ_FIRST(&sc->sc_rx_inactive);
if (data == NULL) {
KASSERT(m == NULL, ("mbuf isn't NULL"));
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next);
STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next);
usbd_xfer_set_frame_data(xfer, 0, data->buf,
usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
/*
* To avoid LOR we should unlock our private mutex here to call
* ieee80211_input() because here is at the end of a USB
* callback and safe to unlock.
*/
URTWN_UNLOCK(sc);
while (m != NULL) {
next = m->m_next;
m->m_next = NULL;
wh = mtod(m, struct ieee80211_frame *);
ni = ieee80211_find_rxnode(ic,
(struct ieee80211_frame_min *)wh);
nf = URTWN_NOISE_FLOOR;
if (ni != NULL) {
(void)ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void)ieee80211_input_all(ic, m, rssi, nf);
m = next;
}
URTWN_LOCK(sc);
break;
default:
/* needs it to the inactive queue due to a error. */
data = STAILQ_FIRST(&sc->sc_rx_active);
if (data != NULL) {
STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next);
STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next);
}
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
goto tr_setup;
}
break;
}
}
static void
urtwn_txeof(struct usb_xfer *xfer, struct urtwn_data *data)
{
struct urtwn_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct mbuf *m;
URTWN_ASSERT_LOCKED(sc);
-
- /*
- * Do any tx complete callback. Note this must be done before releasing
- * the node reference.
- */
- if (data->m) {
- m = data->m;
- if (m->m_flags & M_TXCB) {
- /* XXX status? */
- ieee80211_process_callback(data->ni, m, 0);
- }
- m_freem(m);
- data->m = NULL;
- }
- if (data->ni) {
- ieee80211_free_node(data->ni);
- data->ni = NULL;
- }
+ /* XXX status? */
+ ieee80211_tx_complete(data->ni, data->m, 0);
+ data->ni = NULL;
+ data->m = NULL;
sc->sc_txtimer = 0;
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
}
static void
urtwn_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct urtwn_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
struct urtwn_data *data;
URTWN_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)){
case USB_ST_TRANSFERRED:
data = STAILQ_FIRST(&sc->sc_tx_active);
if (data == NULL)
goto tr_setup;
STAILQ_REMOVE_HEAD(&sc->sc_tx_active, next);
urtwn_txeof(xfer, data);
STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, data, next);
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
data = STAILQ_FIRST(&sc->sc_tx_pending);
if (data == NULL) {
DPRINTF("%s: empty pending queue\n", __func__);
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_tx_pending, next);
STAILQ_INSERT_TAIL(&sc->sc_tx_active, data, next);
usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen);
usbd_transfer_submit(xfer);
- urtwn_start_locked(ifp, sc);
+ urtwn_start(sc);
break;
default:
data = STAILQ_FIRST(&sc->sc_tx_active);
if (data == NULL)
goto tr_setup;
if (data->ni != NULL) {
+ if_inc_counter(data->ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
ieee80211_free_node(data->ni);
data->ni = NULL;
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
}
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static struct urtwn_data *
_urtwn_getbuf(struct urtwn_softc *sc)
{
struct urtwn_data *bf;
bf = STAILQ_FIRST(&sc->sc_tx_inactive);
if (bf != NULL)
STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next);
else
bf = NULL;
if (bf == NULL)
DPRINTF("%s: %s\n", __func__, "out of xmit buffers");
return (bf);
}
static struct urtwn_data *
urtwn_getbuf(struct urtwn_softc *sc)
{
struct urtwn_data *bf;
URTWN_ASSERT_LOCKED(sc);
bf = _urtwn_getbuf(sc);
- if (bf == NULL) {
- struct ifnet *ifp = sc->sc_ifp;
+ if (bf == NULL)
DPRINTF("%s: stop queue\n", __func__);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- }
return (bf);
}
static int
urtwn_write_region_1(struct urtwn_softc *sc, uint16_t addr, uint8_t *buf,
int len)
{
usb_device_request_t req;
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = R92C_REQ_REGS;
USETW(req.wValue, addr);
USETW(req.wIndex, 0);
USETW(req.wLength, len);
return (urtwn_do_request(sc, &req, buf));
}
static void
urtwn_write_1(struct urtwn_softc *sc, uint16_t addr, uint8_t val)
{
urtwn_write_region_1(sc, addr, &val, 1);
}
static void
urtwn_write_2(struct urtwn_softc *sc, uint16_t addr, uint16_t val)
{
val = htole16(val);
urtwn_write_region_1(sc, addr, (uint8_t *)&val, 2);
}
static void
urtwn_write_4(struct urtwn_softc *sc, uint16_t addr, uint32_t val)
{
val = htole32(val);
urtwn_write_region_1(sc, addr, (uint8_t *)&val, 4);
}
static int
urtwn_read_region_1(struct urtwn_softc *sc, uint16_t addr, uint8_t *buf,
int len)
{
usb_device_request_t req;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = R92C_REQ_REGS;
USETW(req.wValue, addr);
USETW(req.wIndex, 0);
USETW(req.wLength, len);
return (urtwn_do_request(sc, &req, buf));
}
static uint8_t
urtwn_read_1(struct urtwn_softc *sc, uint16_t addr)
{
uint8_t val;
if (urtwn_read_region_1(sc, addr, &val, 1) != 0)
return (0xff);
return (val);
}
static uint16_t
urtwn_read_2(struct urtwn_softc *sc, uint16_t addr)
{
uint16_t val;
if (urtwn_read_region_1(sc, addr, (uint8_t *)&val, 2) != 0)
return (0xffff);
return (le16toh(val));
}
static uint32_t
urtwn_read_4(struct urtwn_softc *sc, uint16_t addr)
{
uint32_t val;
if (urtwn_read_region_1(sc, addr, (uint8_t *)&val, 4) != 0)
return (0xffffffff);
return (le32toh(val));
}
static int
urtwn_fw_cmd(struct urtwn_softc *sc, uint8_t id, const void *buf, int len)
{
struct r92c_fw_cmd cmd;
int ntries;
/* Wait for current FW box to be empty. */
for (ntries = 0; ntries < 100; ntries++) {
if (!(urtwn_read_1(sc, R92C_HMETFR) & (1 << sc->fwcur)))
break;
urtwn_ms_delay(sc);
}
if (ntries == 100) {
device_printf(sc->sc_dev,
"could not send firmware command\n");
return (ETIMEDOUT);
}
memset(&cmd, 0, sizeof(cmd));
cmd.id = id;
if (len > 3)
cmd.id |= R92C_CMD_FLAG_EXT;
KASSERT(len <= sizeof(cmd.msg), ("urtwn_fw_cmd\n"));
memcpy(cmd.msg, buf, len);
/* Write the first word last since that will trigger the FW. */
urtwn_write_region_1(sc, R92C_HMEBOX_EXT(sc->fwcur),
(uint8_t *)&cmd + 4, 2);
urtwn_write_region_1(sc, R92C_HMEBOX(sc->fwcur),
(uint8_t *)&cmd + 0, 4);
sc->fwcur = (sc->fwcur + 1) % R92C_H2C_NBOX;
return (0);
}
static __inline void
urtwn_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr, uint32_t val)
{
sc->sc_rf_write(sc, chain, addr, val);
}
static void
urtwn_r92c_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr,
uint32_t val)
{
urtwn_bb_write(sc, R92C_LSSI_PARAM(chain),
SM(R92C_LSSI_PARAM_ADDR, addr) |
SM(R92C_LSSI_PARAM_DATA, val));
}
static void
urtwn_r88e_rf_write(struct urtwn_softc *sc, int chain, uint8_t addr,
uint32_t val)
{
urtwn_bb_write(sc, R92C_LSSI_PARAM(chain),
SM(R88E_LSSI_PARAM_ADDR, addr) |
SM(R92C_LSSI_PARAM_DATA, val));
}
static uint32_t
urtwn_rf_read(struct urtwn_softc *sc, int chain, uint8_t addr)
{
uint32_t reg[R92C_MAX_CHAINS], val;
reg[0] = urtwn_bb_read(sc, R92C_HSSI_PARAM2(0));
if (chain != 0)
reg[chain] = urtwn_bb_read(sc, R92C_HSSI_PARAM2(chain));
urtwn_bb_write(sc, R92C_HSSI_PARAM2(0),
reg[0] & ~R92C_HSSI_PARAM2_READ_EDGE);
urtwn_ms_delay(sc);
urtwn_bb_write(sc, R92C_HSSI_PARAM2(chain),
RW(reg[chain], R92C_HSSI_PARAM2_READ_ADDR, addr) |
R92C_HSSI_PARAM2_READ_EDGE);
urtwn_ms_delay(sc);
urtwn_bb_write(sc, R92C_HSSI_PARAM2(0),
reg[0] | R92C_HSSI_PARAM2_READ_EDGE);
urtwn_ms_delay(sc);
if (urtwn_bb_read(sc, R92C_HSSI_PARAM1(chain)) & R92C_HSSI_PARAM1_PI)
val = urtwn_bb_read(sc, R92C_HSPI_READBACK(chain));
else
val = urtwn_bb_read(sc, R92C_LSSI_READBACK(chain));
return (MS(val, R92C_LSSI_READBACK_DATA));
}
static int
urtwn_llt_write(struct urtwn_softc *sc, uint32_t addr, uint32_t data)
{
int ntries;
urtwn_write_4(sc, R92C_LLT_INIT,
SM(R92C_LLT_INIT_OP, R92C_LLT_INIT_OP_WRITE) |
SM(R92C_LLT_INIT_ADDR, addr) |
SM(R92C_LLT_INIT_DATA, data));
/* Wait for write operation to complete. */
for (ntries = 0; ntries < 20; ntries++) {
if (MS(urtwn_read_4(sc, R92C_LLT_INIT), R92C_LLT_INIT_OP) ==
R92C_LLT_INIT_OP_NO_ACTIVE)
return (0);
urtwn_ms_delay(sc);
}
return (ETIMEDOUT);
}
static uint8_t
urtwn_efuse_read_1(struct urtwn_softc *sc, uint16_t addr)
{
uint32_t reg;
int ntries;
reg = urtwn_read_4(sc, R92C_EFUSE_CTRL);
reg = RW(reg, R92C_EFUSE_CTRL_ADDR, addr);
reg &= ~R92C_EFUSE_CTRL_VALID;
urtwn_write_4(sc, R92C_EFUSE_CTRL, reg);
/* Wait for read operation to complete. */
for (ntries = 0; ntries < 100; ntries++) {
reg = urtwn_read_4(sc, R92C_EFUSE_CTRL);
if (reg & R92C_EFUSE_CTRL_VALID)
return (MS(reg, R92C_EFUSE_CTRL_DATA));
urtwn_ms_delay(sc);
}
device_printf(sc->sc_dev,
"could not read efuse byte at address 0x%x\n", addr);
return (0xff);
}
static void
urtwn_efuse_read(struct urtwn_softc *sc)
{
uint8_t *rom = (uint8_t *)&sc->rom;
uint16_t addr = 0;
uint32_t reg;
uint8_t off, msk;
int i;
urtwn_efuse_switch_power(sc);
memset(&sc->rom, 0xff, sizeof(sc->rom));
while (addr < 512) {
reg = urtwn_efuse_read_1(sc, addr);
if (reg == 0xff)
break;
addr++;
off = reg >> 4;
msk = reg & 0xf;
for (i = 0; i < 4; i++) {
if (msk & (1 << i))
continue;
rom[off * 8 + i * 2 + 0] =
urtwn_efuse_read_1(sc, addr);
addr++;
rom[off * 8 + i * 2 + 1] =
urtwn_efuse_read_1(sc, addr);
addr++;
}
}
#ifdef URTWN_DEBUG
if (urtwn_debug >= 2) {
/* Dump ROM content. */
printf("\n");
for (i = 0; i < sizeof(sc->rom); i++)
printf("%02x:", rom[i]);
printf("\n");
}
#endif
urtwn_write_1(sc, R92C_EFUSE_ACCESS, R92C_EFUSE_ACCESS_OFF);
}
static void
urtwn_efuse_switch_power(struct urtwn_softc *sc)
{
uint32_t reg;
urtwn_write_1(sc, R92C_EFUSE_ACCESS, R92C_EFUSE_ACCESS_ON);
reg = urtwn_read_2(sc, R92C_SYS_ISO_CTRL);
if (!(reg & R92C_SYS_ISO_CTRL_PWC_EV12V)) {
urtwn_write_2(sc, R92C_SYS_ISO_CTRL,
reg | R92C_SYS_ISO_CTRL_PWC_EV12V);
}
reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN);
if (!(reg & R92C_SYS_FUNC_EN_ELDR)) {
urtwn_write_2(sc, R92C_SYS_FUNC_EN,
reg | R92C_SYS_FUNC_EN_ELDR);
}
reg = urtwn_read_2(sc, R92C_SYS_CLKR);
if ((reg & (R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M)) !=
(R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M)) {
urtwn_write_2(sc, R92C_SYS_CLKR,
reg | R92C_SYS_CLKR_LOADER_EN | R92C_SYS_CLKR_ANA8M);
}
}
static int
urtwn_read_chipid(struct urtwn_softc *sc)
{
uint32_t reg;
if (sc->chip & URTWN_CHIP_88E)
return (0);
reg = urtwn_read_4(sc, R92C_SYS_CFG);
if (reg & R92C_SYS_CFG_TRP_VAUX_EN)
return (EIO);
if (reg & R92C_SYS_CFG_TYPE_92C) {
sc->chip |= URTWN_CHIP_92C;
/* Check if it is a castrated 8192C. */
if (MS(urtwn_read_4(sc, R92C_HPON_FSM),
R92C_HPON_FSM_CHIP_BONDING_ID) ==
R92C_HPON_FSM_CHIP_BONDING_ID_92C_1T2R)
sc->chip |= URTWN_CHIP_92C_1T2R;
}
if (reg & R92C_SYS_CFG_VENDOR_UMC) {
sc->chip |= URTWN_CHIP_UMC;
if (MS(reg, R92C_SYS_CFG_CHIP_VER_RTL) == 0)
sc->chip |= URTWN_CHIP_UMC_A_CUT;
}
return (0);
}
static void
urtwn_read_rom(struct urtwn_softc *sc)
{
struct r92c_rom *rom = &sc->rom;
/* Read full ROM image. */
urtwn_efuse_read(sc);
/* XXX Weird but this is what the vendor driver does. */
sc->pa_setting = urtwn_efuse_read_1(sc, 0x1fa);
DPRINTF("PA setting=0x%x\n", sc->pa_setting);
sc->board_type = MS(rom->rf_opt1, R92C_ROM_RF1_BOARD_TYPE);
sc->regulatory = MS(rom->rf_opt1, R92C_ROM_RF1_REGULATORY);
DPRINTF("regulatory type=%d\n", sc->regulatory);
- IEEE80211_ADDR_COPY(sc->sc_bssid, rom->macaddr);
+ IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, rom->macaddr);
sc->sc_rf_write = urtwn_r92c_rf_write;
sc->sc_power_on = urtwn_r92c_power_on;
sc->sc_dma_init = urtwn_r92c_dma_init;
}
static void
urtwn_r88e_read_rom(struct urtwn_softc *sc)
{
uint8_t *rom = sc->r88e_rom;
uint16_t addr = 0;
uint32_t reg;
uint8_t off, msk, tmp;
int i;
off = 0;
urtwn_efuse_switch_power(sc);
/* Read full ROM image. */
memset(&sc->r88e_rom, 0xff, sizeof(sc->r88e_rom));
while (addr < 512) {
reg = urtwn_efuse_read_1(sc, addr);
if (reg == 0xff)
break;
addr++;
if ((reg & 0x1f) == 0x0f) {
tmp = (reg & 0xe0) >> 5;
reg = urtwn_efuse_read_1(sc, addr);
if ((reg & 0x0f) != 0x0f)
off = ((reg & 0xf0) >> 1) | tmp;
addr++;
} else
off = reg >> 4;
msk = reg & 0xf;
for (i = 0; i < 4; i++) {
if (msk & (1 << i))
continue;
rom[off * 8 + i * 2 + 0] =
urtwn_efuse_read_1(sc, addr);
addr++;
rom[off * 8 + i * 2 + 1] =
urtwn_efuse_read_1(sc, addr);
addr++;
}
}
urtwn_write_1(sc, R92C_EFUSE_ACCESS, R92C_EFUSE_ACCESS_OFF);
addr = 0x10;
for (i = 0; i < 6; i++)
sc->cck_tx_pwr[i] = sc->r88e_rom[addr++];
for (i = 0; i < 5; i++)
sc->ht40_tx_pwr[i] = sc->r88e_rom[addr++];
sc->bw20_tx_pwr_diff = (sc->r88e_rom[addr] & 0xf0) >> 4;
if (sc->bw20_tx_pwr_diff & 0x08)
sc->bw20_tx_pwr_diff |= 0xf0;
sc->ofdm_tx_pwr_diff = (sc->r88e_rom[addr] & 0xf);
if (sc->ofdm_tx_pwr_diff & 0x08)
sc->ofdm_tx_pwr_diff |= 0xf0;
sc->regulatory = MS(sc->r88e_rom[0xc1], R92C_ROM_RF1_REGULATORY);
- IEEE80211_ADDR_COPY(sc->sc_bssid, &sc->r88e_rom[0xd7]);
+ IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, &sc->r88e_rom[0xd7]);
sc->sc_rf_write = urtwn_r88e_rf_write;
sc->sc_power_on = urtwn_r88e_power_on;
sc->sc_dma_init = urtwn_r88e_dma_init;
}
/*
* Initialize rate adaptation in firmware.
*/
static int
urtwn_ra_init(struct urtwn_softc *sc)
{
static const uint8_t map[] =
{ 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 };
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni;
struct ieee80211_rateset *rs;
struct r92c_fw_cmd_macid_cfg cmd;
uint32_t rates, basicrates;
uint8_t mode;
int maxrate, maxbasicrate, error, i, j;
ni = ieee80211_ref_node(vap->iv_bss);
rs = &ni->ni_rates;
/* Get normal and basic rates mask. */
rates = basicrates = 0;
maxrate = maxbasicrate = 0;
for (i = 0; i < rs->rs_nrates; i++) {
/* Convert 802.11 rate to HW rate index. */
for (j = 0; j < nitems(map); j++)
if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == map[j])
break;
if (j == nitems(map)) /* Unknown rate, skip. */
continue;
rates |= 1 << j;
if (j > maxrate)
maxrate = j;
if (rs->rs_rates[i] & IEEE80211_RATE_BASIC) {
basicrates |= 1 << j;
if (j > maxbasicrate)
maxbasicrate = j;
}
}
if (ic->ic_curmode == IEEE80211_MODE_11B)
mode = R92C_RAID_11B;
else
mode = R92C_RAID_11BG;
DPRINTF("mode=0x%x rates=0x%08x, basicrates=0x%08x\n",
mode, rates, basicrates);
/* Set rates mask for group addressed frames. */
cmd.macid = URTWN_MACID_BC | URTWN_MACID_VALID;
cmd.mask = htole32(mode << 28 | basicrates);
error = urtwn_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd));
if (error != 0) {
ieee80211_free_node(ni);
device_printf(sc->sc_dev,
"could not add broadcast station\n");
return (error);
}
/* Set initial MRR rate. */
DPRINTF("maxbasicrate=%d\n", maxbasicrate);
urtwn_write_1(sc, R92C_INIDATA_RATE_SEL(URTWN_MACID_BC),
maxbasicrate);
/* Set rates mask for unicast frames. */
cmd.macid = URTWN_MACID_BSS | URTWN_MACID_VALID;
cmd.mask = htole32(mode << 28 | rates);
error = urtwn_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd));
if (error != 0) {
ieee80211_free_node(ni);
device_printf(sc->sc_dev, "could not add BSS station\n");
return (error);
}
/* Set initial MRR rate. */
DPRINTF("maxrate=%d\n", maxrate);
urtwn_write_1(sc, R92C_INIDATA_RATE_SEL(URTWN_MACID_BSS),
maxrate);
/* Indicate highest supported rate. */
ni->ni_txrate = rs->rs_rates[rs->rs_nrates - 1];
ieee80211_free_node(ni);
return (0);
}
void
urtwn_tsf_sync_enable(struct urtwn_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni = vap->iv_bss;
uint64_t tsf;
/* Enable TSF synchronization. */
urtwn_write_1(sc, R92C_BCN_CTRL,
urtwn_read_1(sc, R92C_BCN_CTRL) & ~R92C_BCN_CTRL_DIS_TSF_UDT0);
urtwn_write_1(sc, R92C_BCN_CTRL,
urtwn_read_1(sc, R92C_BCN_CTRL) & ~R92C_BCN_CTRL_EN_BCN);
/* Set initial TSF. */
memcpy(&tsf, ni->ni_tstamp.data, 8);
tsf = le64toh(tsf);
tsf = tsf - (tsf % (vap->iv_bss->ni_intval * IEEE80211_DUR_TU));
tsf -= IEEE80211_DUR_TU;
urtwn_write_4(sc, R92C_TSFTR + 0, tsf);
urtwn_write_4(sc, R92C_TSFTR + 4, tsf >> 32);
urtwn_write_1(sc, R92C_BCN_CTRL,
urtwn_read_1(sc, R92C_BCN_CTRL) | R92C_BCN_CTRL_EN_BCN);
}
static void
urtwn_set_led(struct urtwn_softc *sc, int led, int on)
{
uint8_t reg;
if (led == URTWN_LED_LINK) {
if (sc->chip & URTWN_CHIP_88E) {
reg = urtwn_read_1(sc, R92C_LEDCFG2) & 0xf0;
urtwn_write_1(sc, R92C_LEDCFG2, reg | 0x60);
if (!on) {
reg = urtwn_read_1(sc, R92C_LEDCFG2) & 0x90;
urtwn_write_1(sc, R92C_LEDCFG2,
reg | R92C_LEDCFG0_DIS);
urtwn_write_1(sc, R92C_MAC_PINMUX_CFG,
urtwn_read_1(sc, R92C_MAC_PINMUX_CFG) &
0xfe);
}
} else {
reg = urtwn_read_1(sc, R92C_LEDCFG0) & 0x70;
if (!on)
reg |= R92C_LEDCFG0_DIS;
urtwn_write_1(sc, R92C_LEDCFG0, reg);
}
sc->ledlink = on; /* Save LED state. */
}
}
static int
urtwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct urtwn_vap *uvp = URTWN_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct urtwn_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
enum ieee80211_state ostate;
uint32_t reg;
ostate = vap->iv_state;
DPRINTF("%s -> %s\n", ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
URTWN_LOCK(sc);
callout_stop(&sc->sc_watchdog_ch);
if (ostate == IEEE80211_S_RUN) {
/* Turn link LED off. */
urtwn_set_led(sc, URTWN_LED_LINK, 0);
/* Set media status to 'No Link'. */
reg = urtwn_read_4(sc, R92C_CR);
reg = RW(reg, R92C_CR_NETTYPE, R92C_CR_NETTYPE_NOLINK);
urtwn_write_4(sc, R92C_CR, reg);
/* Stop Rx of data frames. */
urtwn_write_2(sc, R92C_RXFLTMAP2, 0);
/* Rest TSF. */
urtwn_write_1(sc, R92C_DUAL_TSF_RST, 0x03);
/* Disable TSF synchronization. */
urtwn_write_1(sc, R92C_BCN_CTRL,
urtwn_read_1(sc, R92C_BCN_CTRL) |
R92C_BCN_CTRL_DIS_TSF_UDT0);
/* Reset EDCA parameters. */
urtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002f3217);
urtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005e4317);
urtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x00105320);
urtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a444);
}
switch (nstate) {
case IEEE80211_S_INIT:
/* Turn link LED off. */
urtwn_set_led(sc, URTWN_LED_LINK, 0);
break;
case IEEE80211_S_SCAN:
if (ostate != IEEE80211_S_SCAN) {
/* Allow Rx from any BSSID. */
urtwn_write_4(sc, R92C_RCR,
urtwn_read_4(sc, R92C_RCR) &
~(R92C_RCR_CBSSID_DATA | R92C_RCR_CBSSID_BCN));
/* Set gain for scanning. */
reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(0));
reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x20);
urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), reg);
if (!(sc->chip & URTWN_CHIP_88E)) {
reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(1));
reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x20);
urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(1), reg);
}
}
/* Pause AC Tx queues. */
urtwn_write_1(sc, R92C_TXPAUSE,
urtwn_read_1(sc, R92C_TXPAUSE) | 0x0f);
break;
case IEEE80211_S_AUTH:
/* Set initial gain under link. */
reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(0));
reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x32);
urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), reg);
if (!(sc->chip & URTWN_CHIP_88E)) {
reg = urtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(1));
reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x32);
urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(1), reg);
}
urtwn_set_chan(sc, ic->ic_curchan, NULL);
break;
case IEEE80211_S_RUN:
if (vap->iv_opmode == IEEE80211_M_MONITOR) {
/* Enable Rx of data frames. */
urtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff);
/* Turn link LED on. */
urtwn_set_led(sc, URTWN_LED_LINK, 1);
break;
}
ni = ieee80211_ref_node(vap->iv_bss);
/* Set media status to 'Associated'. */
reg = urtwn_read_4(sc, R92C_CR);
reg = RW(reg, R92C_CR_NETTYPE, R92C_CR_NETTYPE_INFRA);
urtwn_write_4(sc, R92C_CR, reg);
/* Set BSSID. */
urtwn_write_4(sc, R92C_BSSID + 0, LE_READ_4(&ni->ni_bssid[0]));
urtwn_write_4(sc, R92C_BSSID + 4, LE_READ_2(&ni->ni_bssid[4]));
if (ic->ic_curmode == IEEE80211_MODE_11B)
urtwn_write_1(sc, R92C_INIRTS_RATE_SEL, 0);
else /* 802.11b/g */
urtwn_write_1(sc, R92C_INIRTS_RATE_SEL, 3);
/* Enable Rx of data frames. */
urtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff);
/* Flush all AC queues. */
urtwn_write_1(sc, R92C_TXPAUSE, 0);
/* Set beacon interval. */
urtwn_write_2(sc, R92C_BCN_INTERVAL, ni->ni_intval);
/* Allow Rx from our BSSID only. */
urtwn_write_4(sc, R92C_RCR,
urtwn_read_4(sc, R92C_RCR) |
R92C_RCR_CBSSID_DATA | R92C_RCR_CBSSID_BCN);
/* Enable TSF synchronization. */
urtwn_tsf_sync_enable(sc);
urtwn_write_1(sc, R92C_SIFS_CCK + 1, 10);
urtwn_write_1(sc, R92C_SIFS_OFDM + 1, 10);
urtwn_write_1(sc, R92C_SPEC_SIFS + 1, 10);
urtwn_write_1(sc, R92C_MAC_SPEC_SIFS + 1, 10);
urtwn_write_1(sc, R92C_R2T_SIFS + 1, 10);
urtwn_write_1(sc, R92C_T2T_SIFS + 1, 10);
/* Intialize rate adaptation. */
if (sc->chip & URTWN_CHIP_88E)
ni->ni_txrate =
ni->ni_rates.rs_rates[ni->ni_rates.rs_nrates-1];
else
urtwn_ra_init(sc);
/* Turn link LED on. */
urtwn_set_led(sc, URTWN_LED_LINK, 1);
sc->avg_pwdb = -1; /* Reset average RSSI. */
/* Reset temperature calibration state machine. */
sc->thcal_state = 0;
sc->thcal_lctemp = 0;
ieee80211_free_node(ni);
break;
default:
break;
}
URTWN_UNLOCK(sc);
IEEE80211_LOCK(ic);
return(uvp->newstate(vap, nstate, arg));
}
static void
urtwn_watchdog(void *arg)
{
struct urtwn_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
if (sc->sc_txtimer > 0) {
if (--sc->sc_txtimer == 0) {
device_printf(sc->sc_dev, "device timeout\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
return;
}
callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc);
}
}
static void
urtwn_update_avgrssi(struct urtwn_softc *sc, int rate, int8_t rssi)
{
int pwdb;
/* Convert antenna signal to percentage. */
if (rssi <= -100 || rssi >= 20)
pwdb = 0;
else if (rssi >= 0)
pwdb = 100;
else
pwdb = 100 + rssi;
if (!(sc->chip & URTWN_CHIP_88E)) {
if (rate <= 3) {
/* CCK gain is smaller than OFDM/MCS gain. */
pwdb += 6;
if (pwdb > 100)
pwdb = 100;
if (pwdb <= 14)
pwdb -= 4;
else if (pwdb <= 26)
pwdb -= 8;
else if (pwdb <= 34)
pwdb -= 6;
else if (pwdb <= 42)
pwdb -= 2;
}
}
if (sc->avg_pwdb == -1) /* Init. */
sc->avg_pwdb = pwdb;
else if (sc->avg_pwdb < pwdb)
sc->avg_pwdb = ((sc->avg_pwdb * 19 + pwdb) / 20) + 1;
else
sc->avg_pwdb = ((sc->avg_pwdb * 19 + pwdb) / 20);
DPRINTFN(4, "PWDB=%d EMA=%d\n", pwdb, sc->avg_pwdb);
}
static int8_t
urtwn_get_rssi(struct urtwn_softc *sc, int rate, void *physt)
{
static const int8_t cckoff[] = { 16, -12, -26, -46 };
struct r92c_rx_phystat *phy;
struct r92c_rx_cck *cck;
uint8_t rpt;
int8_t rssi;
if (rate <= 3) {
cck = (struct r92c_rx_cck *)physt;
if (sc->sc_flags & URTWN_FLAG_CCK_HIPWR) {
rpt = (cck->agc_rpt >> 5) & 0x3;
rssi = (cck->agc_rpt & 0x1f) << 1;
} else {
rpt = (cck->agc_rpt >> 6) & 0x3;
rssi = cck->agc_rpt & 0x3e;
}
rssi = cckoff[rpt] - rssi;
} else { /* OFDM/HT. */
phy = (struct r92c_rx_phystat *)physt;
rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 110;
}
return (rssi);
}
static int8_t
urtwn_r88e_get_rssi(struct urtwn_softc *sc, int rate, void *physt)
{
struct r92c_rx_phystat *phy;
struct r88e_rx_cck *cck;
uint8_t cck_agc_rpt, lna_idx, vga_idx;
int8_t rssi;
rssi = 0;
if (rate <= 3) {
cck = (struct r88e_rx_cck *)physt;
cck_agc_rpt = cck->agc_rpt;
lna_idx = (cck_agc_rpt & 0xe0) >> 5;
vga_idx = cck_agc_rpt & 0x1f;
switch (lna_idx) {
case 7:
if (vga_idx <= 27)
rssi = -100 + 2* (27 - vga_idx);
else
rssi = -100;
break;
case 6:
rssi = -48 + 2 * (2 - vga_idx);
break;
case 5:
rssi = -42 + 2 * (7 - vga_idx);
break;
case 4:
rssi = -36 + 2 * (7 - vga_idx);
break;
case 3:
rssi = -24 + 2 * (7 - vga_idx);
break;
case 2:
rssi = -12 + 2 * (5 - vga_idx);
break;
case 1:
rssi = 8 - (2 * vga_idx);
break;
case 0:
rssi = 14 - (2 * vga_idx);
break;
}
rssi += 6;
} else { /* OFDM/HT. */
phy = (struct r92c_rx_phystat *)physt;
rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 110;
}
return (rssi);
}
static int
urtwn_tx_start(struct urtwn_softc *sc, struct ieee80211_node *ni,
struct mbuf *m0, struct urtwn_data *data)
{
- struct ifnet *ifp = sc->sc_ifp;
struct ieee80211_frame *wh;
struct ieee80211_key *k;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct usb_xfer *xfer;
struct r92c_tx_desc *txd;
uint8_t raid, type;
uint16_t sum;
int i, hasqos, xferlen;
struct usb_xfer *urtwn_pipes[4] = {
sc->sc_xfer[URTWN_BULK_TX_BE],
sc->sc_xfer[URTWN_BULK_TX_BK],
sc->sc_xfer[URTWN_BULK_TX_VI],
sc->sc_xfer[URTWN_BULK_TX_VO]
};
URTWN_ASSERT_LOCKED(sc);
/*
* Software crypto.
*/
wh = mtod(m0, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
device_printf(sc->sc_dev,
"ieee80211_crypto_encap returns NULL.\n");
/* XXX we don't expect the fragmented frames */
m_freem(m0);
return (ENOBUFS);
}
/* in case packet header moved, reset pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
switch (type) {
case IEEE80211_FC0_TYPE_CTL:
case IEEE80211_FC0_TYPE_MGT:
xfer = sc->sc_xfer[URTWN_BULK_TX_VO];
break;
default:
KASSERT(M_WME_GETAC(m0) < 4,
("unsupported WME pipe %d", M_WME_GETAC(m0)));
xfer = urtwn_pipes[M_WME_GETAC(m0)];
break;
}
hasqos = 0;
/* Fill Tx descriptor. */
txd = (struct r92c_tx_desc *)data->buf;
memset(txd, 0, sizeof(*txd));
txd->txdw0 |= htole32(
SM(R92C_TXDW0_PKTLEN, m0->m_pkthdr.len) |
SM(R92C_TXDW0_OFFSET, sizeof(*txd)) |
R92C_TXDW0_OWN | R92C_TXDW0_FSG | R92C_TXDW0_LSG);
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
txd->txdw0 |= htole32(R92C_TXDW0_BMCAST);
if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
type == IEEE80211_FC0_TYPE_DATA) {
if (ic->ic_curmode == IEEE80211_MODE_11B)
raid = R92C_RAID_11B;
else
raid = R92C_RAID_11BG;
if (sc->chip & URTWN_CHIP_88E) {
txd->txdw1 |= htole32(
SM(R88E_TXDW1_MACID, URTWN_MACID_BSS) |
SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
SM(R92C_TXDW1_RAID, raid));
txd->txdw2 |= htole32(R88E_TXDW2_AGGBK);
} else {
txd->txdw1 |= htole32(
SM(R92C_TXDW1_MACID, URTWN_MACID_BSS) |
SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_BE) |
SM(R92C_TXDW1_RAID, raid) | R92C_TXDW1_AGGBK);
}
if (ic->ic_flags & IEEE80211_F_USEPROT) {
if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
txd->txdw4 |= htole32(R92C_TXDW4_CTS2SELF |
R92C_TXDW4_HWRTSEN);
} else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
txd->txdw4 |= htole32(R92C_TXDW4_RTSEN |
R92C_TXDW4_HWRTSEN);
}
}
/* Send RTS at OFDM24. */
txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, 8));
txd->txdw5 |= htole32(0x0001ff00);
/* Send data at OFDM54. */
txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 11));
} else {
txd->txdw1 |= htole32(
SM(R92C_TXDW1_MACID, 0) |
SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT) |
SM(R92C_TXDW1_RAID, R92C_RAID_11B));
/* Force CCK1. */
txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0));
}
/* Set sequence number (already little endian). */
txd->txdseq |= *(uint16_t *)wh->i_seq;
if (!hasqos) {
/* Use HW sequence numbering for non-QoS frames. */
txd->txdw4 |= htole32(R92C_TXDW4_HWSEQ);
txd->txdseq |= htole16(0x8000);
} else
txd->txdw4 |= htole32(R92C_TXDW4_QOS);
/* Compute Tx descriptor checksum. */
sum = 0;
for (i = 0; i < sizeof(*txd) / 2; i++)
sum ^= ((uint16_t *)txd)[i];
txd->txdsum = sum; /* NB: already little endian. */
if (ieee80211_radiotap_active_vap(vap)) {
struct urtwn_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq);
tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags);
ieee80211_radiotap_tx(vap, m0);
}
xferlen = sizeof(*txd) + m0->m_pkthdr.len;
m_copydata(m0, 0, m0->m_pkthdr.len, (caddr_t)&txd[1]);
data->buflen = xferlen;
data->ni = ni;
data->m = m0;
STAILQ_INSERT_TAIL(&sc->sc_tx_pending, data, next);
usbd_transfer_start(xfer);
return (0);
}
-static void
-urtwn_start(struct ifnet *ifp)
+static int
+urtwn_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct urtwn_softc *sc = ifp->if_softc;
+ struct urtwn_softc *sc = ic->ic_softc;
+ int error;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
URTWN_LOCK(sc);
- urtwn_start_locked(ifp, sc);
+ if ((sc->sc_flags & URTWN_RUNNING) == 0) {
+ URTWN_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ URTWN_UNLOCK(sc);
+ return (error);
+ }
+ urtwn_start(sc);
URTWN_UNLOCK(sc);
+
+ return (0);
}
static void
-urtwn_start_locked(struct ifnet *ifp, struct urtwn_softc *sc)
+urtwn_start(struct urtwn_softc *sc)
{
struct ieee80211_node *ni;
struct mbuf *m;
struct urtwn_data *bf;
URTWN_ASSERT_LOCKED(sc);
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
bf = urtwn_getbuf(sc);
if (bf == NULL) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
+ mbufq_prepend(&sc->sc_snd, m);
break;
}
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
m->m_pkthdr.rcvif = NULL;
-
if (urtwn_tx_start(sc, ni, m, bf) != 0) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
ieee80211_free_node(ni);
break;
}
-
sc->sc_txtimer = 5;
callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc);
}
}
-static int
-urtwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+urtwn_parent(struct ieee80211com *ic)
{
- struct ieee80211com *ic = ifp->if_l2com;
struct urtwn_softc *sc = ic->ic_softc;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ int startall = 0;
URTWN_LOCK(sc);
- error = (sc->sc_flags & URTWN_DETACHED) ? ENXIO : 0;
+ if (sc->sc_flags & URTWN_DETACHED) {
+ URTWN_UNLOCK(sc);
+ return;
+ }
+ if (ic->ic_nrunning > 0) {
+ if ((sc->sc_flags & URTWN_RUNNING) == 0) {
+ urtwn_init(sc);
+ startall = 1;
+ }
+ } else if (sc->sc_flags & URTWN_RUNNING)
+ urtwn_stop(sc);
URTWN_UNLOCK(sc);
- if (error != 0)
- return (error);
- switch (cmd) {
- case SIOCSIFFLAGS:
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- urtwn_init(sc);
- startall = 1;
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- urtwn_stop(ifp);
- }
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
- }
- return (error);
+ if (startall)
+ ieee80211_start_all(ic);
}
static int
urtwn_alloc_list(struct urtwn_softc *sc, struct urtwn_data data[],
int ndata, int maxsz)
{
int i, error;
for (i = 0; i < ndata; i++) {
struct urtwn_data *dp = &data[i];
dp->sc = sc;
dp->m = NULL;
dp->buf = malloc(maxsz, M_USBDEV, M_NOWAIT);
if (dp->buf == NULL) {
device_printf(sc->sc_dev,
"could not allocate buffer\n");
error = ENOMEM;
goto fail;
}
dp->ni = NULL;
}
return (0);
fail:
urtwn_free_list(sc, data, ndata);
return (error);
}
static int
urtwn_alloc_rx_list(struct urtwn_softc *sc)
{
int error, i;
error = urtwn_alloc_list(sc, sc->sc_rx, URTWN_RX_LIST_COUNT,
URTWN_RXBUFSZ);
if (error != 0)
return (error);
STAILQ_INIT(&sc->sc_rx_active);
STAILQ_INIT(&sc->sc_rx_inactive);
for (i = 0; i < URTWN_RX_LIST_COUNT; i++)
STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next);
return (0);
}
static int
urtwn_alloc_tx_list(struct urtwn_softc *sc)
{
int error, i;
error = urtwn_alloc_list(sc, sc->sc_tx, URTWN_TX_LIST_COUNT,
URTWN_TXBUFSZ);
if (error != 0)
return (error);
STAILQ_INIT(&sc->sc_tx_active);
STAILQ_INIT(&sc->sc_tx_inactive);
STAILQ_INIT(&sc->sc_tx_pending);
for (i = 0; i < URTWN_TX_LIST_COUNT; i++)
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next);
return (0);
}
static __inline int
urtwn_power_on(struct urtwn_softc *sc)
{
return sc->sc_power_on(sc);
}
static int
urtwn_r92c_power_on(struct urtwn_softc *sc)
{
uint32_t reg;
int ntries;
/* Wait for autoload done bit. */
for (ntries = 0; ntries < 1000; ntries++) {
if (urtwn_read_1(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_PFM_ALDN)
break;
urtwn_ms_delay(sc);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for chip autoload\n");
return (ETIMEDOUT);
}
/* Unlock ISO/CLK/Power control register. */
urtwn_write_1(sc, R92C_RSV_CTRL, 0);
/* Move SPS into PWM mode. */
urtwn_write_1(sc, R92C_SPS0_CTRL, 0x2b);
urtwn_ms_delay(sc);
reg = urtwn_read_1(sc, R92C_LDOV12D_CTRL);
if (!(reg & R92C_LDOV12D_CTRL_LDV12_EN)) {
urtwn_write_1(sc, R92C_LDOV12D_CTRL,
reg | R92C_LDOV12D_CTRL_LDV12_EN);
urtwn_ms_delay(sc);
urtwn_write_1(sc, R92C_SYS_ISO_CTRL,
urtwn_read_1(sc, R92C_SYS_ISO_CTRL) &
~R92C_SYS_ISO_CTRL_MD2PP);
}
/* Auto enable WLAN. */
urtwn_write_2(sc, R92C_APS_FSMCO,
urtwn_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
for (ntries = 0; ntries < 1000; ntries++) {
if (!(urtwn_read_2(sc, R92C_APS_FSMCO) &
R92C_APS_FSMCO_APFM_ONMAC))
break;
urtwn_ms_delay(sc);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for MAC auto ON\n");
return (ETIMEDOUT);
}
/* Enable radio, GPIO and LED functions. */
urtwn_write_2(sc, R92C_APS_FSMCO,
R92C_APS_FSMCO_AFSM_HSUS |
R92C_APS_FSMCO_PDN_EN |
R92C_APS_FSMCO_PFM_ALDN);
/* Release RF digital isolation. */
urtwn_write_2(sc, R92C_SYS_ISO_CTRL,
urtwn_read_2(sc, R92C_SYS_ISO_CTRL) & ~R92C_SYS_ISO_CTRL_DIOR);
/* Initialize MAC. */
urtwn_write_1(sc, R92C_APSD_CTRL,
urtwn_read_1(sc, R92C_APSD_CTRL) & ~R92C_APSD_CTRL_OFF);
for (ntries = 0; ntries < 200; ntries++) {
if (!(urtwn_read_1(sc, R92C_APSD_CTRL) &
R92C_APSD_CTRL_OFF_STATUS))
break;
urtwn_ms_delay(sc);
}
if (ntries == 200) {
device_printf(sc->sc_dev,
"timeout waiting for MAC initialization\n");
return (ETIMEDOUT);
}
/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
reg = urtwn_read_2(sc, R92C_CR);
reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
R92C_CR_SCHEDULE_EN | R92C_CR_MACTXEN | R92C_CR_MACRXEN |
R92C_CR_ENSEC;
urtwn_write_2(sc, R92C_CR, reg);
urtwn_write_1(sc, 0xfe10, 0x19);
return (0);
}
static int
urtwn_r88e_power_on(struct urtwn_softc *sc)
{
uint32_t reg;
int ntries;
/* Wait for power ready bit. */
for (ntries = 0; ntries < 5000; ntries++) {
if (urtwn_read_4(sc, R92C_APS_FSMCO) & R92C_APS_FSMCO_SUS_HOST)
break;
urtwn_ms_delay(sc);
}
if (ntries == 5000) {
device_printf(sc->sc_dev,
"timeout waiting for chip power up\n");
return (ETIMEDOUT);
}
/* Reset BB. */
urtwn_write_1(sc, R92C_SYS_FUNC_EN,
urtwn_read_1(sc, R92C_SYS_FUNC_EN) & ~(R92C_SYS_FUNC_EN_BBRSTB |
R92C_SYS_FUNC_EN_BB_GLB_RST));
urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 2,
urtwn_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80);
/* Disable HWPDN. */
urtwn_write_2(sc, R92C_APS_FSMCO,
urtwn_read_2(sc, R92C_APS_FSMCO) & ~R92C_APS_FSMCO_APDM_HPDN);
/* Disable WL suspend. */
urtwn_write_2(sc, R92C_APS_FSMCO,
urtwn_read_2(sc, R92C_APS_FSMCO) &
~(R92C_APS_FSMCO_AFSM_HSUS | R92C_APS_FSMCO_AFSM_PCIE));
urtwn_write_2(sc, R92C_APS_FSMCO,
urtwn_read_2(sc, R92C_APS_FSMCO) | R92C_APS_FSMCO_APFM_ONMAC);
for (ntries = 0; ntries < 5000; ntries++) {
if (!(urtwn_read_2(sc, R92C_APS_FSMCO) &
R92C_APS_FSMCO_APFM_ONMAC))
break;
urtwn_ms_delay(sc);
}
if (ntries == 5000)
return (ETIMEDOUT);
/* Enable LDO normal mode. */
urtwn_write_1(sc, R92C_LPLDO_CTRL,
urtwn_read_1(sc, R92C_LPLDO_CTRL) & ~0x10);
/* Enable MAC DMA/WMAC/SCHEDULE/SEC blocks. */
urtwn_write_2(sc, R92C_CR, 0);
reg = urtwn_read_2(sc, R92C_CR);
reg |= R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN |
R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | R92C_CR_PROTOCOL_EN |
R92C_CR_SCHEDULE_EN | R92C_CR_ENSEC | R92C_CR_CALTMR_EN;
urtwn_write_2(sc, R92C_CR, reg);
return (0);
}
static int
urtwn_llt_init(struct urtwn_softc *sc)
{
int i, error, page_count, pktbuf_count;
page_count = (sc->chip & URTWN_CHIP_88E) ?
R88E_TX_PAGE_COUNT : R92C_TX_PAGE_COUNT;
pktbuf_count = (sc->chip & URTWN_CHIP_88E) ?
R88E_TXPKTBUF_COUNT : R92C_TXPKTBUF_COUNT;
/* Reserve pages [0; page_count]. */
for (i = 0; i < page_count; i++) {
if ((error = urtwn_llt_write(sc, i, i + 1)) != 0)
return (error);
}
/* NB: 0xff indicates end-of-list. */
if ((error = urtwn_llt_write(sc, i, 0xff)) != 0)
return (error);
/*
* Use pages [page_count + 1; pktbuf_count - 1]
* as ring buffer.
*/
for (++i; i < pktbuf_count - 1; i++) {
if ((error = urtwn_llt_write(sc, i, i + 1)) != 0)
return (error);
}
/* Make the last page point to the beginning of the ring buffer. */
error = urtwn_llt_write(sc, i, page_count + 1);
return (error);
}
static void
urtwn_fw_reset(struct urtwn_softc *sc)
{
uint16_t reg;
int ntries;
/* Tell 8051 to reset itself. */
urtwn_write_1(sc, R92C_HMETFR + 3, 0x20);
/* Wait until 8051 resets by itself. */
for (ntries = 0; ntries < 100; ntries++) {
reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN);
if (!(reg & R92C_SYS_FUNC_EN_CPUEN))
return;
urtwn_ms_delay(sc);
}
/* Force 8051 reset. */
urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg & ~R92C_SYS_FUNC_EN_CPUEN);
}
static void
urtwn_r88e_fw_reset(struct urtwn_softc *sc)
{
uint16_t reg;
reg = urtwn_read_2(sc, R92C_SYS_FUNC_EN);
urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg & ~R92C_SYS_FUNC_EN_CPUEN);
urtwn_write_2(sc, R92C_SYS_FUNC_EN, reg | R92C_SYS_FUNC_EN_CPUEN);
}
static int
urtwn_fw_loadpage(struct urtwn_softc *sc, int page, const uint8_t *buf, int len)
{
uint32_t reg;
int off, mlen, error = 0;
reg = urtwn_read_4(sc, R92C_MCUFWDL);
reg = RW(reg, R92C_MCUFWDL_PAGE, page);
urtwn_write_4(sc, R92C_MCUFWDL, reg);
off = R92C_FW_START_ADDR;
while (len > 0) {
if (len > 196)
mlen = 196;
else if (len > 4)
mlen = 4;
else
mlen = 1;
/* XXX fix this deconst */
error = urtwn_write_region_1(sc, off,
__DECONST(uint8_t *, buf), mlen);
if (error != 0)
break;
off += mlen;
buf += mlen;
len -= mlen;
}
return (error);
}
static int
urtwn_load_firmware(struct urtwn_softc *sc)
{
const struct firmware *fw;
const struct r92c_fw_hdr *hdr;
const char *imagename;
const u_char *ptr;
size_t len;
uint32_t reg;
int mlen, ntries, page, error;
URTWN_UNLOCK(sc);
/* Read firmware image from the filesystem. */
if (sc->chip & URTWN_CHIP_88E)
imagename = "urtwn-rtl8188eufw";
else if ((sc->chip & (URTWN_CHIP_UMC_A_CUT | URTWN_CHIP_92C)) ==
URTWN_CHIP_UMC_A_CUT)
imagename = "urtwn-rtl8192cfwU";
else
imagename = "urtwn-rtl8192cfwT";
fw = firmware_get(imagename);
URTWN_LOCK(sc);
if (fw == NULL) {
device_printf(sc->sc_dev,
"failed loadfirmware of file %s\n", imagename);
return (ENOENT);
}
len = fw->datasize;
if (len < sizeof(*hdr)) {
device_printf(sc->sc_dev, "firmware too short\n");
error = EINVAL;
goto fail;
}
ptr = fw->data;
hdr = (const struct r92c_fw_hdr *)ptr;
/* Check if there is a valid FW header and skip it. */
if ((le16toh(hdr->signature) >> 4) == 0x88c ||
(le16toh(hdr->signature) >> 4) == 0x88e ||
(le16toh(hdr->signature) >> 4) == 0x92c) {
DPRINTF("FW V%d.%d %02d-%02d %02d:%02d\n",
le16toh(hdr->version), le16toh(hdr->subversion),
hdr->month, hdr->date, hdr->hour, hdr->minute);
ptr += sizeof(*hdr);
len -= sizeof(*hdr);
}
if (urtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RAM_DL_SEL) {
if (sc->chip & URTWN_CHIP_88E)
urtwn_r88e_fw_reset(sc);
else
urtwn_fw_reset(sc);
urtwn_write_1(sc, R92C_MCUFWDL, 0);
}
if (!(sc->chip & URTWN_CHIP_88E)) {
urtwn_write_2(sc, R92C_SYS_FUNC_EN,
urtwn_read_2(sc, R92C_SYS_FUNC_EN) |
R92C_SYS_FUNC_EN_CPUEN);
}
urtwn_write_1(sc, R92C_MCUFWDL,
urtwn_read_1(sc, R92C_MCUFWDL) | R92C_MCUFWDL_EN);
urtwn_write_1(sc, R92C_MCUFWDL + 2,
urtwn_read_1(sc, R92C_MCUFWDL + 2) & ~0x08);
/* Reset the FWDL checksum. */
urtwn_write_1(sc, R92C_MCUFWDL,
urtwn_read_1(sc, R92C_MCUFWDL) | R92C_MCUFWDL_CHKSUM_RPT);
for (page = 0; len > 0; page++) {
mlen = min(len, R92C_FW_PAGE_SIZE);
error = urtwn_fw_loadpage(sc, page, ptr, mlen);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load firmware page\n");
goto fail;
}
ptr += mlen;
len -= mlen;
}
urtwn_write_1(sc, R92C_MCUFWDL,
urtwn_read_1(sc, R92C_MCUFWDL) & ~R92C_MCUFWDL_EN);
urtwn_write_1(sc, R92C_MCUFWDL + 1, 0);
/* Wait for checksum report. */
for (ntries = 0; ntries < 1000; ntries++) {
if (urtwn_read_4(sc, R92C_MCUFWDL) & R92C_MCUFWDL_CHKSUM_RPT)
break;
urtwn_ms_delay(sc);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for checksum report\n");
error = ETIMEDOUT;
goto fail;
}
reg = urtwn_read_4(sc, R92C_MCUFWDL);
reg = (reg & ~R92C_MCUFWDL_WINTINI_RDY) | R92C_MCUFWDL_RDY;
urtwn_write_4(sc, R92C_MCUFWDL, reg);
if (sc->chip & URTWN_CHIP_88E)
urtwn_r88e_fw_reset(sc);
/* Wait for firmware readiness. */
for (ntries = 0; ntries < 1000; ntries++) {
if (urtwn_read_4(sc, R92C_MCUFWDL) & R92C_MCUFWDL_WINTINI_RDY)
break;
urtwn_ms_delay(sc);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for firmware readiness\n");
error = ETIMEDOUT;
goto fail;
}
fail:
firmware_put(fw, FIRMWARE_UNLOAD);
return (error);
}
static __inline int
urtwn_dma_init(struct urtwn_softc *sc)
{
return sc->sc_dma_init(sc);
}
static int
urtwn_r92c_dma_init(struct urtwn_softc *sc)
{
int hashq, hasnq, haslq, nqueues, nqpages, nrempages;
uint32_t reg;
int error;
/* Initialize LLT table. */
error = urtwn_llt_init(sc);
if (error != 0)
return (error);
/* Get Tx queues to USB endpoints mapping. */
hashq = hasnq = haslq = 0;
reg = urtwn_read_2(sc, R92C_USB_EP + 1);
DPRINTFN(2, "USB endpoints mapping 0x%x\n", reg);
if (MS(reg, R92C_USB_EP_HQ) != 0)
hashq = 1;
if (MS(reg, R92C_USB_EP_NQ) != 0)
hasnq = 1;
if (MS(reg, R92C_USB_EP_LQ) != 0)
haslq = 1;
nqueues = hashq + hasnq + haslq;
if (nqueues == 0)
return (EIO);
/* Get the number of pages for each queue. */
nqpages = (R92C_TX_PAGE_COUNT - R92C_PUBQ_NPAGES) / nqueues;
/* The remaining pages are assigned to the high priority queue. */
nrempages = (R92C_TX_PAGE_COUNT - R92C_PUBQ_NPAGES) % nqueues;
/* Set number of pages for normal priority queue. */
urtwn_write_1(sc, R92C_RQPN_NPQ, hasnq ? nqpages : 0);
urtwn_write_4(sc, R92C_RQPN,
/* Set number of pages for public queue. */
SM(R92C_RQPN_PUBQ, R92C_PUBQ_NPAGES) |
/* Set number of pages for high priority queue. */
SM(R92C_RQPN_HPQ, hashq ? nqpages + nrempages : 0) |
/* Set number of pages for low priority queue. */
SM(R92C_RQPN_LPQ, haslq ? nqpages : 0) |
/* Load values. */
R92C_RQPN_LD);
urtwn_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, R92C_TX_PAGE_BOUNDARY);
urtwn_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, R92C_TX_PAGE_BOUNDARY);
urtwn_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, R92C_TX_PAGE_BOUNDARY);
urtwn_write_1(sc, R92C_TRXFF_BNDY, R92C_TX_PAGE_BOUNDARY);
urtwn_write_1(sc, R92C_TDECTRL + 1, R92C_TX_PAGE_BOUNDARY);
/* Set queue to USB pipe mapping. */
reg = urtwn_read_2(sc, R92C_TRXDMA_CTRL);
reg &= ~R92C_TRXDMA_CTRL_QMAP_M;
if (nqueues == 1) {
if (hashq)
reg |= R92C_TRXDMA_CTRL_QMAP_HQ;
else if (hasnq)
reg |= R92C_TRXDMA_CTRL_QMAP_NQ;
else
reg |= R92C_TRXDMA_CTRL_QMAP_LQ;
} else if (nqueues == 2) {
/* All 2-endpoints configs have a high priority queue. */
if (!hashq)
return (EIO);
if (hasnq)
reg |= R92C_TRXDMA_CTRL_QMAP_HQ_NQ;
else
reg |= R92C_TRXDMA_CTRL_QMAP_HQ_LQ;
} else
reg |= R92C_TRXDMA_CTRL_QMAP_3EP;
urtwn_write_2(sc, R92C_TRXDMA_CTRL, reg);
/* Set Tx/Rx transfer page boundary. */
urtwn_write_2(sc, R92C_TRXFF_BNDY + 2, 0x27ff);
/* Set Tx/Rx transfer page size. */
urtwn_write_1(sc, R92C_PBP,
SM(R92C_PBP_PSRX, R92C_PBP_128) |
SM(R92C_PBP_PSTX, R92C_PBP_128));
return (0);
}
static int
urtwn_r88e_dma_init(struct urtwn_softc *sc)
{
struct usb_interface *iface;
uint32_t reg;
int nqueues;
int error;
/* Initialize LLT table. */
error = urtwn_llt_init(sc);
if (error != 0)
return (error);
/* Get Tx queues to USB endpoints mapping. */
iface = usbd_get_iface(sc->sc_udev, 0);
nqueues = iface->idesc->bNumEndpoints - 1;
if (nqueues == 0)
return (EIO);
/* Set number of pages for normal priority queue. */
urtwn_write_2(sc, R92C_RQPN_NPQ, 0x000d);
urtwn_write_4(sc, R92C_RQPN, 0x808e000d);
urtwn_write_1(sc, R92C_TXPKTBUF_BCNQ_BDNY, R88E_TX_PAGE_BOUNDARY);
urtwn_write_1(sc, R92C_TXPKTBUF_MGQ_BDNY, R88E_TX_PAGE_BOUNDARY);
urtwn_write_1(sc, R92C_TXPKTBUF_WMAC_LBK_BF_HD, R88E_TX_PAGE_BOUNDARY);
urtwn_write_1(sc, R92C_TRXFF_BNDY, R88E_TX_PAGE_BOUNDARY);
urtwn_write_1(sc, R92C_TDECTRL + 1, R88E_TX_PAGE_BOUNDARY);
/* Set queue to USB pipe mapping. */
reg = urtwn_read_2(sc, R92C_TRXDMA_CTRL);
reg &= ~R92C_TRXDMA_CTRL_QMAP_M;
if (nqueues == 1)
reg |= R92C_TRXDMA_CTRL_QMAP_LQ;
else if (nqueues == 2)
reg |= R92C_TRXDMA_CTRL_QMAP_HQ_NQ;
else
reg |= R92C_TRXDMA_CTRL_QMAP_3EP;
urtwn_write_2(sc, R92C_TRXDMA_CTRL, reg);
/* Set Tx/Rx transfer page boundary. */
urtwn_write_2(sc, R92C_TRXFF_BNDY + 2, 0x23ff);
/* Set Tx/Rx transfer page size. */
urtwn_write_1(sc, R92C_PBP,
SM(R92C_PBP_PSRX, R92C_PBP_128) |
SM(R92C_PBP_PSTX, R92C_PBP_128));
return (0);
}
static void
urtwn_mac_init(struct urtwn_softc *sc)
{
int i;
/* Write MAC initialization values. */
if (sc->chip & URTWN_CHIP_88E) {
for (i = 0; i < nitems(rtl8188eu_mac); i++) {
urtwn_write_1(sc, rtl8188eu_mac[i].reg,
rtl8188eu_mac[i].val);
}
urtwn_write_1(sc, R92C_MAX_AGGR_NUM, 0x07);
} else {
for (i = 0; i < nitems(rtl8192cu_mac); i++)
urtwn_write_1(sc, rtl8192cu_mac[i].reg,
rtl8192cu_mac[i].val);
}
}
static void
urtwn_bb_init(struct urtwn_softc *sc)
{
const struct urtwn_bb_prog *prog;
uint32_t reg;
uint8_t crystalcap;
int i;
/* Enable BB and RF. */
urtwn_write_2(sc, R92C_SYS_FUNC_EN,
urtwn_read_2(sc, R92C_SYS_FUNC_EN) |
R92C_SYS_FUNC_EN_BBRSTB | R92C_SYS_FUNC_EN_BB_GLB_RST |
R92C_SYS_FUNC_EN_DIO_RF);
if (!(sc->chip & URTWN_CHIP_88E))
urtwn_write_2(sc, R92C_AFE_PLL_CTRL, 0xdb83);
urtwn_write_1(sc, R92C_RF_CTRL,
R92C_RF_CTRL_EN | R92C_RF_CTRL_RSTB | R92C_RF_CTRL_SDMRSTB);
urtwn_write_1(sc, R92C_SYS_FUNC_EN,
R92C_SYS_FUNC_EN_USBA | R92C_SYS_FUNC_EN_USBD |
R92C_SYS_FUNC_EN_BB_GLB_RST | R92C_SYS_FUNC_EN_BBRSTB);
if (!(sc->chip & URTWN_CHIP_88E)) {
urtwn_write_1(sc, R92C_LDOHCI12_CTRL, 0x0f);
urtwn_write_1(sc, 0x15, 0xe9);
urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 1, 0x80);
}
/* Select BB programming based on board type. */
if (sc->chip & URTWN_CHIP_88E)
prog = &rtl8188eu_bb_prog;
else if (!(sc->chip & URTWN_CHIP_92C)) {
if (sc->board_type == R92C_BOARD_TYPE_MINICARD)
prog = &rtl8188ce_bb_prog;
else if (sc->board_type == R92C_BOARD_TYPE_HIGHPA)
prog = &rtl8188ru_bb_prog;
else
prog = &rtl8188cu_bb_prog;
} else {
if (sc->board_type == R92C_BOARD_TYPE_MINICARD)
prog = &rtl8192ce_bb_prog;
else
prog = &rtl8192cu_bb_prog;
}
/* Write BB initialization values. */
for (i = 0; i < prog->count; i++) {
urtwn_bb_write(sc, prog->regs[i], prog->vals[i]);
urtwn_ms_delay(sc);
}
if (sc->chip & URTWN_CHIP_92C_1T2R) {
/* 8192C 1T only configuration. */
reg = urtwn_bb_read(sc, R92C_FPGA0_TXINFO);
reg = (reg & ~0x00000003) | 0x2;
urtwn_bb_write(sc, R92C_FPGA0_TXINFO, reg);
reg = urtwn_bb_read(sc, R92C_FPGA1_TXINFO);
reg = (reg & ~0x00300033) | 0x00200022;
urtwn_bb_write(sc, R92C_FPGA1_TXINFO, reg);
reg = urtwn_bb_read(sc, R92C_CCK0_AFESETTING);
reg = (reg & ~0xff000000) | 0x45 << 24;
urtwn_bb_write(sc, R92C_CCK0_AFESETTING, reg);
reg = urtwn_bb_read(sc, R92C_OFDM0_TRXPATHENA);
reg = (reg & ~0x000000ff) | 0x23;
urtwn_bb_write(sc, R92C_OFDM0_TRXPATHENA, reg);
reg = urtwn_bb_read(sc, R92C_OFDM0_AGCPARAM1);
reg = (reg & ~0x00000030) | 1 << 4;
urtwn_bb_write(sc, R92C_OFDM0_AGCPARAM1, reg);
reg = urtwn_bb_read(sc, 0xe74);
reg = (reg & ~0x0c000000) | 2 << 26;
urtwn_bb_write(sc, 0xe74, reg);
reg = urtwn_bb_read(sc, 0xe78);
reg = (reg & ~0x0c000000) | 2 << 26;
urtwn_bb_write(sc, 0xe78, reg);
reg = urtwn_bb_read(sc, 0xe7c);
reg = (reg & ~0x0c000000) | 2 << 26;
urtwn_bb_write(sc, 0xe7c, reg);
reg = urtwn_bb_read(sc, 0xe80);
reg = (reg & ~0x0c000000) | 2 << 26;
urtwn_bb_write(sc, 0xe80, reg);
reg = urtwn_bb_read(sc, 0xe88);
reg = (reg & ~0x0c000000) | 2 << 26;
urtwn_bb_write(sc, 0xe88, reg);
}
/* Write AGC values. */
for (i = 0; i < prog->agccount; i++) {
urtwn_bb_write(sc, R92C_OFDM0_AGCRSSITABLE,
prog->agcvals[i]);
urtwn_ms_delay(sc);
}
if (sc->chip & URTWN_CHIP_88E) {
urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), 0x69553422);
urtwn_ms_delay(sc);
urtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), 0x69553420);
urtwn_ms_delay(sc);
crystalcap = sc->r88e_rom[0xb9];
if (crystalcap == 0xff)
crystalcap = 0x20;
crystalcap &= 0x3f;
reg = urtwn_bb_read(sc, R92C_AFE_XTAL_CTRL);
urtwn_bb_write(sc, R92C_AFE_XTAL_CTRL,
RW(reg, R92C_AFE_XTAL_CTRL_ADDR,
crystalcap | crystalcap << 6));
} else {
if (urtwn_bb_read(sc, R92C_HSSI_PARAM2(0)) &
R92C_HSSI_PARAM2_CCK_HIPWR)
sc->sc_flags |= URTWN_FLAG_CCK_HIPWR;
}
}
void
urtwn_rf_init(struct urtwn_softc *sc)
{
const struct urtwn_rf_prog *prog;
uint32_t reg, type;
int i, j, idx, off;
/* Select RF programming based on board type. */
if (sc->chip & URTWN_CHIP_88E)
prog = rtl8188eu_rf_prog;
else if (!(sc->chip & URTWN_CHIP_92C)) {
if (sc->board_type == R92C_BOARD_TYPE_MINICARD)
prog = rtl8188ce_rf_prog;
else if (sc->board_type == R92C_BOARD_TYPE_HIGHPA)
prog = rtl8188ru_rf_prog;
else
prog = rtl8188cu_rf_prog;
} else
prog = rtl8192ce_rf_prog;
for (i = 0; i < sc->nrxchains; i++) {
/* Save RF_ENV control type. */
idx = i / 2;
off = (i % 2) * 16;
reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx));
type = (reg >> off) & 0x10;
/* Set RF_ENV enable. */
reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACEOE(i));
reg |= 0x100000;
urtwn_bb_write(sc, R92C_FPGA0_RFIFACEOE(i), reg);
urtwn_ms_delay(sc);
/* Set RF_ENV output high. */
reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACEOE(i));
reg |= 0x10;
urtwn_bb_write(sc, R92C_FPGA0_RFIFACEOE(i), reg);
urtwn_ms_delay(sc);
/* Set address and data lengths of RF registers. */
reg = urtwn_bb_read(sc, R92C_HSSI_PARAM2(i));
reg &= ~R92C_HSSI_PARAM2_ADDR_LENGTH;
urtwn_bb_write(sc, R92C_HSSI_PARAM2(i), reg);
urtwn_ms_delay(sc);
reg = urtwn_bb_read(sc, R92C_HSSI_PARAM2(i));
reg &= ~R92C_HSSI_PARAM2_DATA_LENGTH;
urtwn_bb_write(sc, R92C_HSSI_PARAM2(i), reg);
urtwn_ms_delay(sc);
/* Write RF initialization values for this chain. */
for (j = 0; j < prog[i].count; j++) {
if (prog[i].regs[j] >= 0xf9 &&
prog[i].regs[j] <= 0xfe) {
/*
* These are fake RF registers offsets that
* indicate a delay is required.
*/
usb_pause_mtx(&sc->sc_mtx, hz / 20); /* 50ms */
continue;
}
urtwn_rf_write(sc, i, prog[i].regs[j],
prog[i].vals[j]);
urtwn_ms_delay(sc);
}
/* Restore RF_ENV control type. */
reg = urtwn_bb_read(sc, R92C_FPGA0_RFIFACESW(idx));
reg &= ~(0x10 << off) | (type << off);
urtwn_bb_write(sc, R92C_FPGA0_RFIFACESW(idx), reg);
/* Cache RF register CHNLBW. */
sc->rf_chnlbw[i] = urtwn_rf_read(sc, i, R92C_RF_CHNLBW);
}
if ((sc->chip & (URTWN_CHIP_UMC_A_CUT | URTWN_CHIP_92C)) ==
URTWN_CHIP_UMC_A_CUT) {
urtwn_rf_write(sc, 0, R92C_RF_RX_G1, 0x30255);
urtwn_rf_write(sc, 0, R92C_RF_RX_G2, 0x50a00);
}
}
static void
urtwn_cam_init(struct urtwn_softc *sc)
{
/* Invalidate all CAM entries. */
urtwn_write_4(sc, R92C_CAMCMD,
R92C_CAMCMD_POLLING | R92C_CAMCMD_CLR);
}
static void
urtwn_pa_bias_init(struct urtwn_softc *sc)
{
uint8_t reg;
int i;
for (i = 0; i < sc->nrxchains; i++) {
if (sc->pa_setting & (1 << i))
continue;
urtwn_rf_write(sc, i, R92C_RF_IPA, 0x0f406);
urtwn_rf_write(sc, i, R92C_RF_IPA, 0x4f406);
urtwn_rf_write(sc, i, R92C_RF_IPA, 0x8f406);
urtwn_rf_write(sc, i, R92C_RF_IPA, 0xcf406);
}
if (!(sc->pa_setting & 0x10)) {
reg = urtwn_read_1(sc, 0x16);
reg = (reg & ~0xf0) | 0x90;
urtwn_write_1(sc, 0x16, reg);
}
}
static void
urtwn_rxfilter_init(struct urtwn_softc *sc)
{
/* Initialize Rx filter. */
/* TODO: use better filter for monitor mode. */
urtwn_write_4(sc, R92C_RCR,
R92C_RCR_AAP | R92C_RCR_APM | R92C_RCR_AM | R92C_RCR_AB |
R92C_RCR_APP_ICV | R92C_RCR_AMF | R92C_RCR_HTC_LOC_CTRL |
R92C_RCR_APP_MIC | R92C_RCR_APP_PHYSTS);
/* Accept all multicast frames. */
urtwn_write_4(sc, R92C_MAR + 0, 0xffffffff);
urtwn_write_4(sc, R92C_MAR + 4, 0xffffffff);
/* Accept all management frames. */
urtwn_write_2(sc, R92C_RXFLTMAP0, 0xffff);
/* Reject all control frames. */
urtwn_write_2(sc, R92C_RXFLTMAP1, 0x0000);
/* Accept all data frames. */
urtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff);
}
static void
urtwn_edca_init(struct urtwn_softc *sc)
{
urtwn_write_2(sc, R92C_SPEC_SIFS, 0x100a);
urtwn_write_2(sc, R92C_MAC_SPEC_SIFS, 0x100a);
urtwn_write_2(sc, R92C_SIFS_CCK, 0x100a);
urtwn_write_2(sc, R92C_SIFS_OFDM, 0x100a);
urtwn_write_4(sc, R92C_EDCA_BE_PARAM, 0x005ea42b);
urtwn_write_4(sc, R92C_EDCA_BK_PARAM, 0x0000a44f);
urtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005ea324);
urtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002fa226);
}
void
urtwn_write_txpower(struct urtwn_softc *sc, int chain,
uint16_t power[URTWN_RIDX_COUNT])
{
uint32_t reg;
/* Write per-CCK rate Tx power. */
if (chain == 0) {
reg = urtwn_bb_read(sc, R92C_TXAGC_A_CCK1_MCS32);
reg = RW(reg, R92C_TXAGC_A_CCK1, power[0]);
urtwn_bb_write(sc, R92C_TXAGC_A_CCK1_MCS32, reg);
reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11);
reg = RW(reg, R92C_TXAGC_A_CCK2, power[1]);
reg = RW(reg, R92C_TXAGC_A_CCK55, power[2]);
reg = RW(reg, R92C_TXAGC_A_CCK11, power[3]);
urtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg);
} else {
reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK1_55_MCS32);
reg = RW(reg, R92C_TXAGC_B_CCK1, power[0]);
reg = RW(reg, R92C_TXAGC_B_CCK2, power[1]);
reg = RW(reg, R92C_TXAGC_B_CCK55, power[2]);
urtwn_bb_write(sc, R92C_TXAGC_B_CCK1_55_MCS32, reg);
reg = urtwn_bb_read(sc, R92C_TXAGC_B_CCK11_A_CCK2_11);
reg = RW(reg, R92C_TXAGC_B_CCK11, power[3]);
urtwn_bb_write(sc, R92C_TXAGC_B_CCK11_A_CCK2_11, reg);
}
/* Write per-OFDM rate Tx power. */
urtwn_bb_write(sc, R92C_TXAGC_RATE18_06(chain),
SM(R92C_TXAGC_RATE06, power[ 4]) |
SM(R92C_TXAGC_RATE09, power[ 5]) |
SM(R92C_TXAGC_RATE12, power[ 6]) |
SM(R92C_TXAGC_RATE18, power[ 7]));
urtwn_bb_write(sc, R92C_TXAGC_RATE54_24(chain),
SM(R92C_TXAGC_RATE24, power[ 8]) |
SM(R92C_TXAGC_RATE36, power[ 9]) |
SM(R92C_TXAGC_RATE48, power[10]) |
SM(R92C_TXAGC_RATE54, power[11]));
/* Write per-MCS Tx power. */
urtwn_bb_write(sc, R92C_TXAGC_MCS03_MCS00(chain),
SM(R92C_TXAGC_MCS00, power[12]) |
SM(R92C_TXAGC_MCS01, power[13]) |
SM(R92C_TXAGC_MCS02, power[14]) |
SM(R92C_TXAGC_MCS03, power[15]));
urtwn_bb_write(sc, R92C_TXAGC_MCS07_MCS04(chain),
SM(R92C_TXAGC_MCS04, power[16]) |
SM(R92C_TXAGC_MCS05, power[17]) |
SM(R92C_TXAGC_MCS06, power[18]) |
SM(R92C_TXAGC_MCS07, power[19]));
urtwn_bb_write(sc, R92C_TXAGC_MCS11_MCS08(chain),
SM(R92C_TXAGC_MCS08, power[20]) |
SM(R92C_TXAGC_MCS09, power[21]) |
SM(R92C_TXAGC_MCS10, power[22]) |
SM(R92C_TXAGC_MCS11, power[23]));
urtwn_bb_write(sc, R92C_TXAGC_MCS15_MCS12(chain),
SM(R92C_TXAGC_MCS12, power[24]) |
SM(R92C_TXAGC_MCS13, power[25]) |
SM(R92C_TXAGC_MCS14, power[26]) |
SM(R92C_TXAGC_MCS15, power[27]));
}
void
urtwn_get_txpower(struct urtwn_softc *sc, int chain,
struct ieee80211_channel *c, struct ieee80211_channel *extc,
uint16_t power[URTWN_RIDX_COUNT])
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct r92c_rom *rom = &sc->rom;
uint16_t cckpow, ofdmpow, htpow, diff, max;
const struct urtwn_txpwr *base;
int ridx, chan, group;
/* Determine channel group. */
chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */
if (chan <= 3)
group = 0;
else if (chan <= 9)
group = 1;
else
group = 2;
/* Get original Tx power based on board type and RF chain. */
if (!(sc->chip & URTWN_CHIP_92C)) {
if (sc->board_type == R92C_BOARD_TYPE_HIGHPA)
base = &rtl8188ru_txagc[chain];
else
base = &rtl8192cu_txagc[chain];
} else
base = &rtl8192cu_txagc[chain];
memset(power, 0, URTWN_RIDX_COUNT * sizeof(power[0]));
if (sc->regulatory == 0) {
for (ridx = 0; ridx <= 3; ridx++)
power[ridx] = base->pwr[0][ridx];
}
for (ridx = 4; ridx < URTWN_RIDX_COUNT; ridx++) {
if (sc->regulatory == 3) {
power[ridx] = base->pwr[0][ridx];
/* Apply vendor limits. */
if (extc != NULL)
max = rom->ht40_max_pwr[group];
else
max = rom->ht20_max_pwr[group];
max = (max >> (chain * 4)) & 0xf;
if (power[ridx] > max)
power[ridx] = max;
} else if (sc->regulatory == 1) {
if (extc == NULL)
power[ridx] = base->pwr[group][ridx];
} else if (sc->regulatory != 2)
power[ridx] = base->pwr[0][ridx];
}
/* Compute per-CCK rate Tx power. */
cckpow = rom->cck_tx_pwr[chain][group];
for (ridx = 0; ridx <= 3; ridx++) {
power[ridx] += cckpow;
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
}
htpow = rom->ht40_1s_tx_pwr[chain][group];
if (sc->ntxchains > 1) {
/* Apply reduction for 2 spatial streams. */
diff = rom->ht40_2s_tx_pwr_diff[group];
diff = (diff >> (chain * 4)) & 0xf;
htpow = (htpow > diff) ? htpow - diff : 0;
}
/* Compute per-OFDM rate Tx power. */
diff = rom->ofdm_tx_pwr_diff[group];
diff = (diff >> (chain * 4)) & 0xf;
ofdmpow = htpow + diff; /* HT->OFDM correction. */
for (ridx = 4; ridx <= 11; ridx++) {
power[ridx] += ofdmpow;
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
}
/* Compute per-MCS Tx power. */
if (extc == NULL) {
diff = rom->ht20_tx_pwr_diff[group];
diff = (diff >> (chain * 4)) & 0xf;
htpow += diff; /* HT40->HT20 correction. */
}
for (ridx = 12; ridx <= 27; ridx++) {
power[ridx] += htpow;
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
}
#ifdef URTWN_DEBUG
if (urtwn_debug >= 4) {
/* Dump per-rate Tx power values. */
printf("Tx power for chain %d:\n", chain);
for (ridx = 0; ridx < URTWN_RIDX_COUNT; ridx++)
printf("Rate %d = %u\n", ridx, power[ridx]);
}
#endif
}
void
urtwn_r88e_get_txpower(struct urtwn_softc *sc, int chain,
struct ieee80211_channel *c, struct ieee80211_channel *extc,
uint16_t power[URTWN_RIDX_COUNT])
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t cckpow, ofdmpow, bw20pow, htpow;
const struct urtwn_r88e_txpwr *base;
int ridx, chan, group;
/* Determine channel group. */
chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */
if (chan <= 2)
group = 0;
else if (chan <= 5)
group = 1;
else if (chan <= 8)
group = 2;
else if (chan <= 11)
group = 3;
else if (chan <= 13)
group = 4;
else
group = 5;
/* Get original Tx power based on board type and RF chain. */
base = &rtl8188eu_txagc[chain];
memset(power, 0, URTWN_RIDX_COUNT * sizeof(power[0]));
if (sc->regulatory == 0) {
for (ridx = 0; ridx <= 3; ridx++)
power[ridx] = base->pwr[0][ridx];
}
for (ridx = 4; ridx < URTWN_RIDX_COUNT; ridx++) {
if (sc->regulatory == 3)
power[ridx] = base->pwr[0][ridx];
else if (sc->regulatory == 1) {
if (extc == NULL)
power[ridx] = base->pwr[group][ridx];
} else if (sc->regulatory != 2)
power[ridx] = base->pwr[0][ridx];
}
/* Compute per-CCK rate Tx power. */
cckpow = sc->cck_tx_pwr[group];
for (ridx = 0; ridx <= 3; ridx++) {
power[ridx] += cckpow;
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
}
htpow = sc->ht40_tx_pwr[group];
/* Compute per-OFDM rate Tx power. */
ofdmpow = htpow + sc->ofdm_tx_pwr_diff;
for (ridx = 4; ridx <= 11; ridx++) {
power[ridx] += ofdmpow;
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
}
bw20pow = htpow + sc->bw20_tx_pwr_diff;
for (ridx = 12; ridx <= 27; ridx++) {
power[ridx] += bw20pow;
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
}
}
void
urtwn_set_txpower(struct urtwn_softc *sc, struct ieee80211_channel *c,
struct ieee80211_channel *extc)
{
uint16_t power[URTWN_RIDX_COUNT];
int i;
for (i = 0; i < sc->ntxchains; i++) {
/* Compute per-rate Tx power values. */
if (sc->chip & URTWN_CHIP_88E)
urtwn_r88e_get_txpower(sc, i, c, extc, power);
else
urtwn_get_txpower(sc, i, c, extc, power);
/* Write per-rate Tx power values to hardware. */
urtwn_write_txpower(sc, i, power);
}
}
static void
urtwn_scan_start(struct ieee80211com *ic)
{
/* XXX do nothing? */
}
static void
urtwn_scan_end(struct ieee80211com *ic)
{
/* XXX do nothing? */
}
static void
urtwn_set_channel(struct ieee80211com *ic)
{
struct urtwn_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
URTWN_LOCK(sc);
if (vap->iv_state == IEEE80211_S_SCAN) {
/* Make link LED blink during scan. */
urtwn_set_led(sc, URTWN_LED_LINK, !sc->ledlink);
}
urtwn_set_chan(sc, ic->ic_curchan, NULL);
URTWN_UNLOCK(sc);
}
static void
urtwn_update_mcast(struct ieee80211com *ic)
{
/* XXX do nothing? */
}
static void
urtwn_set_chan(struct urtwn_softc *sc, struct ieee80211_channel *c,
struct ieee80211_channel *extc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t reg;
u_int chan;
int i;
chan = ieee80211_chan2ieee(ic, c); /* XXX center freq! */
if (chan == 0 || chan == IEEE80211_CHAN_ANY) {
device_printf(sc->sc_dev,
"%s: invalid channel %x\n", __func__, chan);
return;
}
/* Set Tx power for this new channel. */
urtwn_set_txpower(sc, c, extc);
for (i = 0; i < sc->nrxchains; i++) {
urtwn_rf_write(sc, i, R92C_RF_CHNLBW,
RW(sc->rf_chnlbw[i], R92C_RF_CHNLBW_CHNL, chan));
}
#ifndef IEEE80211_NO_HT
if (extc != NULL) {
/* Is secondary channel below or above primary? */
int prichlo = c->ic_freq < extc->ic_freq;
urtwn_write_1(sc, R92C_BWOPMODE,
urtwn_read_1(sc, R92C_BWOPMODE) & ~R92C_BWOPMODE_20MHZ);
reg = urtwn_read_1(sc, R92C_RRSR + 2);
reg = (reg & ~0x6f) | (prichlo ? 1 : 2) << 5;
urtwn_write_1(sc, R92C_RRSR + 2, reg);
urtwn_bb_write(sc, R92C_FPGA0_RFMOD,
urtwn_bb_read(sc, R92C_FPGA0_RFMOD) | R92C_RFMOD_40MHZ);
urtwn_bb_write(sc, R92C_FPGA1_RFMOD,
urtwn_bb_read(sc, R92C_FPGA1_RFMOD) | R92C_RFMOD_40MHZ);
/* Set CCK side band. */
reg = urtwn_bb_read(sc, R92C_CCK0_SYSTEM);
reg = (reg & ~0x00000010) | (prichlo ? 0 : 1) << 4;
urtwn_bb_write(sc, R92C_CCK0_SYSTEM, reg);
reg = urtwn_bb_read(sc, R92C_OFDM1_LSTF);
reg = (reg & ~0x00000c00) | (prichlo ? 1 : 2) << 10;
urtwn_bb_write(sc, R92C_OFDM1_LSTF, reg);
urtwn_bb_write(sc, R92C_FPGA0_ANAPARAM2,
urtwn_bb_read(sc, R92C_FPGA0_ANAPARAM2) &
~R92C_FPGA0_ANAPARAM2_CBW20);
reg = urtwn_bb_read(sc, 0x818);
reg = (reg & ~0x0c000000) | (prichlo ? 2 : 1) << 26;
urtwn_bb_write(sc, 0x818, reg);
/* Select 40MHz bandwidth. */
urtwn_rf_write(sc, 0, R92C_RF_CHNLBW,
(sc->rf_chnlbw[0] & ~0xfff) | chan);
} else
#endif
{
urtwn_write_1(sc, R92C_BWOPMODE,
urtwn_read_1(sc, R92C_BWOPMODE) | R92C_BWOPMODE_20MHZ);
urtwn_bb_write(sc, R92C_FPGA0_RFMOD,
urtwn_bb_read(sc, R92C_FPGA0_RFMOD) & ~R92C_RFMOD_40MHZ);
urtwn_bb_write(sc, R92C_FPGA1_RFMOD,
urtwn_bb_read(sc, R92C_FPGA1_RFMOD) & ~R92C_RFMOD_40MHZ);
if (!(sc->chip & URTWN_CHIP_88E)) {
urtwn_bb_write(sc, R92C_FPGA0_ANAPARAM2,
urtwn_bb_read(sc, R92C_FPGA0_ANAPARAM2) |
R92C_FPGA0_ANAPARAM2_CBW20);
}
/* Select 20MHz bandwidth. */
urtwn_rf_write(sc, 0, R92C_RF_CHNLBW,
(sc->rf_chnlbw[0] & ~0xfff) | chan |
((sc->chip & URTWN_CHIP_88E) ? R88E_RF_CHNLBW_BW20 :
R92C_RF_CHNLBW_BW20));
}
}
static void
urtwn_iq_calib(struct urtwn_softc *sc)
{
/* TODO */
}
static void
urtwn_lc_calib(struct urtwn_softc *sc)
{
uint32_t rf_ac[2];
uint8_t txmode;
int i;
txmode = urtwn_read_1(sc, R92C_OFDM1_LSTF + 3);
if ((txmode & 0x70) != 0) {
/* Disable all continuous Tx. */
urtwn_write_1(sc, R92C_OFDM1_LSTF + 3, txmode & ~0x70);
/* Set RF mode to standby mode. */
for (i = 0; i < sc->nrxchains; i++) {
rf_ac[i] = urtwn_rf_read(sc, i, R92C_RF_AC);
urtwn_rf_write(sc, i, R92C_RF_AC,
RW(rf_ac[i], R92C_RF_AC_MODE,
R92C_RF_AC_MODE_STANDBY));
}
} else {
/* Block all Tx queues. */
urtwn_write_1(sc, R92C_TXPAUSE, 0xff);
}
/* Start calibration. */
urtwn_rf_write(sc, 0, R92C_RF_CHNLBW,
urtwn_rf_read(sc, 0, R92C_RF_CHNLBW) | R92C_RF_CHNLBW_LCSTART);
/* Give calibration the time to complete. */
usb_pause_mtx(&sc->sc_mtx, hz / 10); /* 100ms */
/* Restore configuration. */
if ((txmode & 0x70) != 0) {
/* Restore Tx mode. */
urtwn_write_1(sc, R92C_OFDM1_LSTF + 3, txmode);
/* Restore RF mode. */
for (i = 0; i < sc->nrxchains; i++)
urtwn_rf_write(sc, i, R92C_RF_AC, rf_ac[i]);
} else {
/* Unblock all Tx queues. */
urtwn_write_1(sc, R92C_TXPAUSE, 0x00);
}
}
static void
-urtwn_init_locked(void *arg)
+urtwn_init(struct urtwn_softc *sc)
{
- struct urtwn_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ uint8_t macaddr[IEEE80211_ADDR_LEN];
uint32_t reg;
int error;
URTWN_ASSERT_LOCKED(sc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- urtwn_stop_locked(ifp);
+ if (sc->sc_flags & URTWN_RUNNING)
+ urtwn_stop(sc);
/* Init firmware commands ring. */
sc->fwcur = 0;
/* Allocate Tx/Rx buffers. */
error = urtwn_alloc_rx_list(sc);
if (error != 0)
goto fail;
error = urtwn_alloc_tx_list(sc);
if (error != 0)
goto fail;
/* Power on adapter. */
error = urtwn_power_on(sc);
if (error != 0)
goto fail;
/* Initialize DMA. */
error = urtwn_dma_init(sc);
if (error != 0)
goto fail;
/* Set info size in Rx descriptors (in 64-bit words). */
urtwn_write_1(sc, R92C_RX_DRVINFO_SZ, 4);
/* Init interrupts. */
if (sc->chip & URTWN_CHIP_88E) {
urtwn_write_4(sc, R88E_HISR, 0xffffffff);
urtwn_write_4(sc, R88E_HIMR, R88E_HIMR_CPWM | R88E_HIMR_CPWM2 |
R88E_HIMR_TBDER | R88E_HIMR_PSTIMEOUT);
urtwn_write_4(sc, R88E_HIMRE, R88E_HIMRE_RXFOVW |
R88E_HIMRE_TXFOVW | R88E_HIMRE_RXERR | R88E_HIMRE_TXERR);
urtwn_write_1(sc, R92C_USB_SPECIAL_OPTION,
urtwn_read_1(sc, R92C_USB_SPECIAL_OPTION) |
R92C_USB_SPECIAL_OPTION_INT_BULK_SEL);
} else {
urtwn_write_4(sc, R92C_HISR, 0xffffffff);
urtwn_write_4(sc, R92C_HIMR, 0xffffffff);
}
/* Set MAC address. */
- urtwn_write_region_1(sc, R92C_MACID, IF_LLADDR(ifp),
- IEEE80211_ADDR_LEN);
+ IEEE80211_ADDR_COPY(macaddr, vap ? vap->iv_myaddr : ic->ic_macaddr);
+ urtwn_write_region_1(sc, R92C_MACID, macaddr, IEEE80211_ADDR_LEN);
/* Set initial network type. */
reg = urtwn_read_4(sc, R92C_CR);
reg = RW(reg, R92C_CR_NETTYPE, R92C_CR_NETTYPE_INFRA);
urtwn_write_4(sc, R92C_CR, reg);
urtwn_rxfilter_init(sc);
/* Set response rate. */
reg = urtwn_read_4(sc, R92C_RRSR);
reg = RW(reg, R92C_RRSR_RATE_BITMAP, R92C_RRSR_RATE_CCK_ONLY_1M);
urtwn_write_4(sc, R92C_RRSR, reg);
/* Set short/long retry limits. */
urtwn_write_2(sc, R92C_RL,
SM(R92C_RL_SRL, 0x30) | SM(R92C_RL_LRL, 0x30));
/* Initialize EDCA parameters. */
urtwn_edca_init(sc);
/* Setup rate fallback. */
if (!(sc->chip & URTWN_CHIP_88E)) {
urtwn_write_4(sc, R92C_DARFRC + 0, 0x00000000);
urtwn_write_4(sc, R92C_DARFRC + 4, 0x10080404);
urtwn_write_4(sc, R92C_RARFRC + 0, 0x04030201);
urtwn_write_4(sc, R92C_RARFRC + 4, 0x08070605);
}
urtwn_write_1(sc, R92C_FWHW_TXQ_CTRL,
urtwn_read_1(sc, R92C_FWHW_TXQ_CTRL) |
R92C_FWHW_TXQ_CTRL_AMPDU_RTY_NEW);
/* Set ACK timeout. */
urtwn_write_1(sc, R92C_ACKTO, 0x40);
/* Setup USB aggregation. */
reg = urtwn_read_4(sc, R92C_TDECTRL);
reg = RW(reg, R92C_TDECTRL_BLK_DESC_NUM, 6);
urtwn_write_4(sc, R92C_TDECTRL, reg);
urtwn_write_1(sc, R92C_TRXDMA_CTRL,
urtwn_read_1(sc, R92C_TRXDMA_CTRL) |
R92C_TRXDMA_CTRL_RXDMA_AGG_EN);
urtwn_write_1(sc, R92C_RXDMA_AGG_PG_TH, 48);
if (sc->chip & URTWN_CHIP_88E)
urtwn_write_1(sc, R92C_RXDMA_AGG_PG_TH + 1, 4);
else {
urtwn_write_1(sc, R92C_USB_DMA_AGG_TO, 4);
urtwn_write_1(sc, R92C_USB_SPECIAL_OPTION,
urtwn_read_1(sc, R92C_USB_SPECIAL_OPTION) |
R92C_USB_SPECIAL_OPTION_AGG_EN);
urtwn_write_1(sc, R92C_USB_AGG_TH, 8);
urtwn_write_1(sc, R92C_USB_AGG_TO, 6);
}
/* Initialize beacon parameters. */
urtwn_write_2(sc, R92C_BCN_CTRL, 0x1010);
urtwn_write_2(sc, R92C_TBTT_PROHIBIT, 0x6404);
urtwn_write_1(sc, R92C_DRVERLYINT, 0x05);
urtwn_write_1(sc, R92C_BCNDMATIM, 0x02);
urtwn_write_2(sc, R92C_BCNTCFG, 0x660f);
if (!(sc->chip & URTWN_CHIP_88E)) {
/* Setup AMPDU aggregation. */
urtwn_write_4(sc, R92C_AGGLEN_LMT, 0x99997631); /* MCS7~0 */
urtwn_write_1(sc, R92C_AGGR_BREAK_TIME, 0x16);
urtwn_write_2(sc, R92C_MAX_AGGR_NUM, 0x0708);
urtwn_write_1(sc, R92C_BCN_MAX_ERR, 0xff);
}
/* Load 8051 microcode. */
error = urtwn_load_firmware(sc);
if (error != 0)
goto fail;
/* Initialize MAC/BB/RF blocks. */
urtwn_mac_init(sc);
urtwn_bb_init(sc);
urtwn_rf_init(sc);
if (sc->chip & URTWN_CHIP_88E) {
urtwn_write_2(sc, R92C_CR,
urtwn_read_2(sc, R92C_CR) | R92C_CR_MACTXEN |
R92C_CR_MACRXEN);
}
/* Turn CCK and OFDM blocks on. */
reg = urtwn_bb_read(sc, R92C_FPGA0_RFMOD);
reg |= R92C_RFMOD_CCK_EN;
urtwn_bb_write(sc, R92C_FPGA0_RFMOD, reg);
reg = urtwn_bb_read(sc, R92C_FPGA0_RFMOD);
reg |= R92C_RFMOD_OFDM_EN;
urtwn_bb_write(sc, R92C_FPGA0_RFMOD, reg);
/* Clear per-station keys table. */
urtwn_cam_init(sc);
/* Enable hardware sequence numbering. */
urtwn_write_1(sc, R92C_HWSEQ_CTRL, 0xff);
/* Perform LO and IQ calibrations. */
urtwn_iq_calib(sc);
/* Perform LC calibration. */
urtwn_lc_calib(sc);
/* Fix USB interference issue. */
if (!(sc->chip & URTWN_CHIP_88E)) {
urtwn_write_1(sc, 0xfe40, 0xe0);
urtwn_write_1(sc, 0xfe41, 0x8d);
urtwn_write_1(sc, 0xfe42, 0x80);
urtwn_pa_bias_init(sc);
}
/* Initialize GPIO setting. */
urtwn_write_1(sc, R92C_GPIO_MUXCFG,
urtwn_read_1(sc, R92C_GPIO_MUXCFG) & ~R92C_GPIO_MUXCFG_ENBT);
/* Fix for lower temperature. */
if (!(sc->chip & URTWN_CHIP_88E))
urtwn_write_1(sc, 0x15, 0xe9);
usbd_transfer_start(sc->sc_xfer[URTWN_BULK_RX]);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_flags |= URTWN_RUNNING;
callout_reset(&sc->sc_watchdog_ch, hz, urtwn_watchdog, sc);
fail:
return;
}
static void
-urtwn_init(void *arg)
+urtwn_stop(struct urtwn_softc *sc)
{
- struct urtwn_softc *sc = arg;
- URTWN_LOCK(sc);
- urtwn_init_locked(arg);
- URTWN_UNLOCK(sc);
-}
-
-static void
-urtwn_stop_locked(struct ifnet *ifp)
-{
- struct urtwn_softc *sc = ifp->if_softc;
-
URTWN_ASSERT_LOCKED(sc);
-
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
-
+ sc->sc_flags &= ~URTWN_RUNNING;
callout_stop(&sc->sc_watchdog_ch);
urtwn_abort_xfers(sc);
}
static void
-urtwn_stop(struct ifnet *ifp)
-{
- struct urtwn_softc *sc = ifp->if_softc;
-
- URTWN_LOCK(sc);
- urtwn_stop_locked(ifp);
- URTWN_UNLOCK(sc);
-}
-
-static void
urtwn_abort_xfers(struct urtwn_softc *sc)
{
int i;
URTWN_ASSERT_LOCKED(sc);
/* abort any pending transfers */
for (i = 0; i < URTWN_N_TRANSFER; i++)
usbd_transfer_stop(sc->sc_xfer[i]);
}
static int
urtwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct urtwn_softc *sc = ic->ic_softc;
struct urtwn_data *bf;
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(sc->sc_flags & URTWN_RUNNING)) {
m_freem(m);
ieee80211_free_node(ni);
return (ENETDOWN);
}
URTWN_LOCK(sc);
bf = urtwn_getbuf(sc);
if (bf == NULL) {
ieee80211_free_node(ni);
m_freem(m);
URTWN_UNLOCK(sc);
return (ENOBUFS);
}
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
if (urtwn_tx_start(sc, ni, m, bf) != 0) {
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, bf, next);
URTWN_UNLOCK(sc);
return (EIO);
}
URTWN_UNLOCK(sc);
sc->sc_txtimer = 5;
return (0);
}
static void
urtwn_ms_delay(struct urtwn_softc *sc)
{
usb_pause_mtx(&sc->sc_mtx, hz / 1000);
}
static device_method_t urtwn_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, urtwn_match),
DEVMETHOD(device_attach, urtwn_attach),
DEVMETHOD(device_detach, urtwn_detach),
DEVMETHOD_END
};
static driver_t urtwn_driver = {
"urtwn",
urtwn_methods,
sizeof(struct urtwn_softc)
};
static devclass_t urtwn_devclass;
DRIVER_MODULE(urtwn, uhub, urtwn_driver, urtwn_devclass, NULL, NULL);
MODULE_DEPEND(urtwn, usb, 1, 1, 1);
MODULE_DEPEND(urtwn, wlan, 1, 1, 1);
MODULE_DEPEND(urtwn, firmware, 1, 1, 1);
MODULE_VERSION(urtwn, 1);
Index: head/sys/dev/usb/wlan/if_urtwnreg.h
===================================================================
--- head/sys/dev/usb/wlan/if_urtwnreg.h (revision 287196)
+++ head/sys/dev/usb/wlan/if_urtwnreg.h (revision 287197)
@@ -1,2187 +1,2188 @@
/*-
* Copyright (c) 2010 Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
* $OpenBSD: if_urtwnreg.h,v 1.3 2010/11/16 18:02:59 damien Exp $
* $FreeBSD$
*/
#define URTWN_CONFIG_INDEX 0
#define URTWN_IFACE_INDEX 0
#define URTWN_NOISE_FLOOR -95
#define R92C_MAX_CHAINS 2
/* Maximum number of output pipes is 3. */
#define R92C_MAX_EPOUT 3
#define R92C_MAX_TX_PWR 0x3f
#define R92C_PUBQ_NPAGES 231
#define R92C_TXPKTBUF_COUNT 256
#define R92C_TX_PAGE_COUNT 248
#define R92C_TX_PAGE_BOUNDARY (R92C_TX_PAGE_COUNT + 1)
#define R88E_TXPKTBUF_COUNT 177
#define R88E_TX_PAGE_COUNT 169
#define R88E_TX_PAGE_BOUNDARY (R88E_TX_PAGE_COUNT + 1)
#define R92C_H2C_NBOX 4
/* USB Requests. */
#define R92C_REQ_REGS 0x05
/*
* MAC registers.
*/
/* System Configuration. */
#define R92C_SYS_ISO_CTRL 0x000
#define R92C_SYS_FUNC_EN 0x002
#define R92C_APS_FSMCO 0x004
#define R92C_SYS_CLKR 0x008
#define R92C_AFE_MISC 0x010
#define R92C_SPS0_CTRL 0x011
#define R92C_SPS_OCP_CFG 0x018
#define R92C_RSV_CTRL 0x01c
#define R92C_RF_CTRL 0x01f
#define R92C_LDOA15_CTRL 0x020
#define R92C_LDOV12D_CTRL 0x021
#define R92C_LDOHCI12_CTRL 0x022
#define R92C_LPLDO_CTRL 0x023
#define R92C_AFE_XTAL_CTRL 0x024
#define R92C_AFE_PLL_CTRL 0x028
#define R92C_EFUSE_CTRL 0x030
#define R92C_EFUSE_TEST 0x034
#define R92C_PWR_DATA 0x038
#define R92C_CAL_TIMER 0x03c
#define R92C_ACLK_MON 0x03e
#define R92C_GPIO_MUXCFG 0x040
#define R92C_GPIO_IO_SEL 0x042
#define R92C_MAC_PINMUX_CFG 0x043
#define R92C_GPIO_PIN_CTRL 0x044
#define R92C_GPIO_INTM 0x048
#define R92C_LEDCFG0 0x04c
#define R92C_LEDCFG1 0x04d
#define R92C_LEDCFG2 0x04e
#define R92C_LEDCFG3 0x04f
#define R92C_FSIMR 0x050
#define R92C_FSISR 0x054
#define R92C_HSIMR 0x058
#define R92C_HSISR 0x05c
#define R92C_MCUFWDL 0x080
#define R92C_HMEBOX_EXT(idx) (0x088 + (idx) * 2)
#define R88E_HIMR 0x0b0
#define R88E_HISR 0x0b4
#define R88E_HIMRE 0x0b8
#define R88E_HISRE 0x0bc
#define R92C_EFUSE_ACCESS 0x0cf
#define R92C_BIST_SCAN 0x0d0
#define R92C_BIST_RPT 0x0d4
#define R92C_BIST_ROM_RPT 0x0d8
#define R92C_USB_SIE_INTF 0x0e0
#define R92C_PCIE_MIO_INTF 0x0e4
#define R92C_PCIE_MIO_INTD 0x0e8
#define R92C_HPON_FSM 0x0ec
#define R92C_SYS_CFG 0x0f0
/* MAC General Configuration. */
#define R92C_CR 0x100
#define R92C_PBP 0x104
#define R92C_TRXDMA_CTRL 0x10c
#define R92C_TRXFF_BNDY 0x114
#define R92C_TRXFF_STATUS 0x118
#define R92C_RXFF_PTR 0x11c
#define R92C_HIMR 0x120
#define R92C_HISR 0x124
#define R92C_HIMRE 0x128
#define R92C_HISRE 0x12c
#define R92C_CPWM 0x12f
#define R92C_FWIMR 0x130
#define R92C_FWISR 0x134
#define R92C_PKTBUF_DBG_CTRL 0x140
#define R92C_PKTBUF_DBG_DATA_L 0x144
#define R92C_PKTBUF_DBG_DATA_H 0x148
#define R92C_TC0_CTRL(i) (0x150 + (i) * 4)
#define R92C_TCUNIT_BASE 0x164
#define R92C_MBIST_START 0x174
#define R92C_MBIST_DONE 0x178
#define R92C_MBIST_FAIL 0x17c
#define R92C_C2HEVT_MSG_NORMAL 0x1a0
#define R92C_C2HEVT_MSG_TEST 0x1b8
#define R92C_C2HEVT_CLEAR 0x1bf
#define R92C_MCUTST_1 0x1c0
#define R92C_FMETHR 0x1c8
#define R92C_HMETFR 0x1cc
#define R92C_HMEBOX(idx) (0x1d0 + (idx) * 4)
#define R92C_LLT_INIT 0x1e0
#define R92C_BB_ACCESS_CTRL 0x1e8
#define R92C_BB_ACCESS_DATA 0x1ec
#define R88E_HMEBOX_EXT(idx) (0x1f0 + (idx) * 4)
/* Tx DMA Configuration. */
#define R92C_RQPN 0x200
#define R92C_FIFOPAGE 0x204
#define R92C_TDECTRL 0x208
#define R92C_TXDMA_OFFSET_CHK 0x20c
#define R92C_TXDMA_STATUS 0x210
#define R92C_RQPN_NPQ 0x214
/* Rx DMA Configuration. */
#define R92C_RXDMA_AGG_PG_TH 0x280
#define R92C_RXPKT_NUM 0x284
#define R92C_RXDMA_STATUS 0x288
/* Protocol Configuration. */
#define R92C_FWHW_TXQ_CTRL 0x420
#define R92C_HWSEQ_CTRL 0x423
#define R92C_TXPKTBUF_BCNQ_BDNY 0x424
#define R92C_TXPKTBUF_MGQ_BDNY 0x425
#define R92C_SPEC_SIFS 0x428
#define R92C_RL 0x42a
#define R92C_DARFRC 0x430
#define R92C_RARFRC 0x438
#define R92C_RRSR 0x440
#define R92C_ARFR(i) (0x444 + (i) * 4)
#define R92C_AGGLEN_LMT 0x458
#define R92C_AMPDU_MIN_SPACE 0x45c
#define R92C_TXPKTBUF_WMAC_LBK_BF_HD 0x45d
#define R92C_FAST_EDCA_CTRL 0x460
#define R92C_RD_RESP_PKT_TH 0x463
#define R92C_INIRTS_RATE_SEL 0x480
#define R92C_INIDATA_RATE_SEL(macid) (0x484 + (macid))
#define R92C_MAX_AGGR_NUM 0x4ca
/* EDCA Configuration. */
#define R92C_EDCA_VO_PARAM 0x500
#define R92C_EDCA_VI_PARAM 0x504
#define R92C_EDCA_BE_PARAM 0x508
#define R92C_EDCA_BK_PARAM 0x50c
#define R92C_BCNTCFG 0x510
#define R92C_PIFS 0x512
#define R92C_RDG_PIFS 0x513
#define R92C_SIFS_CCK 0x514
#define R92C_SIFS_OFDM 0x516
#define R92C_AGGR_BREAK_TIME 0x51a
#define R92C_SLOT 0x51b
#define R92C_TX_PTCL_CTRL 0x520
#define R92C_TXPAUSE 0x522
#define R92C_DIS_TXREQ_CLR 0x523
#define R92C_RD_CTRL 0x524
#define R92C_TBTT_PROHIBIT 0x540
#define R92C_RD_NAV_NXT 0x544
#define R92C_NAV_PROT_LEN 0x546
#define R92C_BCN_CTRL 0x550
#define R92C_MBID_NUM 0x552
#define R92C_DUAL_TSF_RST 0x553
#define R92C_BCN_INTERVAL 0x554
#define R92C_DRVERLYINT 0x558
#define R92C_BCNDMATIM 0x559
#define R92C_ATIMWND 0x55a
#define R92C_USTIME_TSF 0x55c
#define R92C_BCN_MAX_ERR 0x55d
#define R92C_RXTSF_OFFSET_CCK 0x55e
#define R92C_RXTSF_OFFSET_OFDM 0x55f
#define R92C_TSFTR 0x560
#define R92C_INIT_TSFTR 0x564
#define R92C_PSTIMER 0x580
#define R92C_TIMER0 0x584
#define R92C_TIMER1 0x588
#define R92C_ACMHWCTRL 0x5c0
#define R92C_ACMRSTCTRL 0x5c1
#define R92C_ACMAVG 0x5c2
#define R92C_VO_ADMTIME 0x5c4
#define R92C_VI_ADMTIME 0x5c6
#define R92C_BE_ADMTIME 0x5c8
#define R92C_EDCA_RANDOM_GEN 0x5cc
#define R92C_SCH_TXCMD 0x5d0
/* WMAC Configuration. */
#define R92C_APSD_CTRL 0x600
#define R92C_BWOPMODE 0x603
#define R92C_RCR 0x608
#define R92C_RX_DRVINFO_SZ 0x60f
#define R92C_MACID 0x610
#define R92C_BSSID 0x618
#define R92C_MAR 0x620
#define R92C_MAC_SPEC_SIFS 0x63a
#define R92C_R2T_SIFS 0x63c
#define R92C_T2T_SIFS 0x63e
#define R92C_ACKTO 0x640
#define R92C_CAMCMD 0x670
#define R92C_CAMWRITE 0x674
#define R92C_CAMREAD 0x678
#define R92C_CAMDBG 0x67c
#define R92C_SECCFG 0x680
#define R92C_RXFLTMAP0 0x6a0
#define R92C_RXFLTMAP1 0x6a2
#define R92C_RXFLTMAP2 0x6a4
/* Bits for R92C_SYS_ISO_CTRL. */
#define R92C_SYS_ISO_CTRL_MD2PP 0x0001
#define R92C_SYS_ISO_CTRL_UA2USB 0x0002
#define R92C_SYS_ISO_CTRL_UD2CORE 0x0004
#define R92C_SYS_ISO_CTRL_PA2PCIE 0x0008
#define R92C_SYS_ISO_CTRL_PD2CORE 0x0010
#define R92C_SYS_ISO_CTRL_IP2MAC 0x0020
#define R92C_SYS_ISO_CTRL_DIOP 0x0040
#define R92C_SYS_ISO_CTRL_DIOE 0x0080
#define R92C_SYS_ISO_CTRL_EB2CORE 0x0100
#define R92C_SYS_ISO_CTRL_DIOR 0x0200
#define R92C_SYS_ISO_CTRL_PWC_EV25V 0x4000
#define R92C_SYS_ISO_CTRL_PWC_EV12V 0x8000
/* Bits for R92C_SYS_FUNC_EN. */
#define R92C_SYS_FUNC_EN_BBRSTB 0x0001
#define R92C_SYS_FUNC_EN_BB_GLB_RST 0x0002
#define R92C_SYS_FUNC_EN_USBA 0x0004
#define R92C_SYS_FUNC_EN_UPLL 0x0008
#define R92C_SYS_FUNC_EN_USBD 0x0010
#define R92C_SYS_FUNC_EN_DIO_PCIE 0x0020
#define R92C_SYS_FUNC_EN_PCIEA 0x0040
#define R92C_SYS_FUNC_EN_PPLL 0x0080
#define R92C_SYS_FUNC_EN_PCIED 0x0100
#define R92C_SYS_FUNC_EN_DIOE 0x0200
#define R92C_SYS_FUNC_EN_CPUEN 0x0400
#define R92C_SYS_FUNC_EN_DCORE 0x0800
#define R92C_SYS_FUNC_EN_ELDR 0x1000
#define R92C_SYS_FUNC_EN_DIO_RF 0x2000
#define R92C_SYS_FUNC_EN_HWPDN 0x4000
#define R92C_SYS_FUNC_EN_MREGEN 0x8000
/* Bits for R92C_APS_FSMCO. */
#define R92C_APS_FSMCO_PFM_LDALL 0x00000001
#define R92C_APS_FSMCO_PFM_ALDN 0x00000002
#define R92C_APS_FSMCO_PFM_LDKP 0x00000004
#define R92C_APS_FSMCO_PFM_WOWL 0x00000008
#define R92C_APS_FSMCO_PDN_EN 0x00000010
#define R92C_APS_FSMCO_PDN_PL 0x00000020
#define R92C_APS_FSMCO_APFM_ONMAC 0x00000100
#define R92C_APS_FSMCO_APFM_OFF 0x00000200
#define R92C_APS_FSMCO_APFM_RSM 0x00000400
#define R92C_APS_FSMCO_AFSM_HSUS 0x00000800
#define R92C_APS_FSMCO_AFSM_PCIE 0x00001000
#define R92C_APS_FSMCO_APDM_MAC 0x00002000
#define R92C_APS_FSMCO_APDM_HOST 0x00004000
#define R92C_APS_FSMCO_APDM_HPDN 0x00008000
#define R92C_APS_FSMCO_RDY_MACON 0x00010000
#define R92C_APS_FSMCO_SUS_HOST 0x00020000
#define R92C_APS_FSMCO_ROP_ALD 0x00100000
#define R92C_APS_FSMCO_ROP_PWR 0x00200000
#define R92C_APS_FSMCO_ROP_SPS 0x00400000
#define R92C_APS_FSMCO_SOP_MRST 0x02000000
#define R92C_APS_FSMCO_SOP_FUSE 0x04000000
#define R92C_APS_FSMCO_SOP_ABG 0x08000000
#define R92C_APS_FSMCO_SOP_AMB 0x10000000
#define R92C_APS_FSMCO_SOP_RCK 0x20000000
#define R92C_APS_FSMCO_SOP_A8M 0x40000000
#define R92C_APS_FSMCO_XOP_BTCK 0x80000000
/* Bits for R92C_SYS_CLKR. */
#define R92C_SYS_CLKR_ANAD16V_EN 0x00000001
#define R92C_SYS_CLKR_ANA8M 0x00000002
#define R92C_SYS_CLKR_MACSLP 0x00000010
#define R92C_SYS_CLKR_LOADER_EN 0x00000020
#define R92C_SYS_CLKR_80M_SSC_DIS 0x00000080
#define R92C_SYS_CLKR_80M_SSC_EN_HO 0x00000100
#define R92C_SYS_CLKR_PHY_SSC_RSTB 0x00000200
#define R92C_SYS_CLKR_SEC_EN 0x00000400
#define R92C_SYS_CLKR_MAC_EN 0x00000800
#define R92C_SYS_CLKR_SYS_EN 0x00001000
#define R92C_SYS_CLKR_RING_EN 0x00002000
/* Bits for R92C_RF_CTRL. */
#define R92C_RF_CTRL_EN 0x01
#define R92C_RF_CTRL_RSTB 0x02
#define R92C_RF_CTRL_SDMRSTB 0x04
/* Bits for R92C_LDOV12D_CTRL. */
#define R92C_LDOV12D_CTRL_LDV12_EN 0x01
/* Bits for R92C_AFE_XTAL_CTRL. */
#define R92C_AFE_XTAL_CTRL_ADDR_M 0x007ff800
#define R92C_AFE_XTAL_CTRL_ADDR_S 11
/* Bits for R92C_EFUSE_CTRL. */
#define R92C_EFUSE_CTRL_DATA_M 0x000000ff
#define R92C_EFUSE_CTRL_DATA_S 0
#define R92C_EFUSE_CTRL_ADDR_M 0x0003ff00
#define R92C_EFUSE_CTRL_ADDR_S 8
#define R92C_EFUSE_CTRL_VALID 0x80000000
/* Bits for R92C_GPIO_MUXCFG. */
#define R92C_GPIO_MUXCFG_ENBT 0x0020
/* Bits for R92C_LEDCFG0. */
#define R92C_LEDCFG0_DIS 0x08
/* Bits for R92C_MCUFWDL. */
#define R92C_MCUFWDL_EN 0x00000001
#define R92C_MCUFWDL_RDY 0x00000002
#define R92C_MCUFWDL_CHKSUM_RPT 0x00000004
#define R92C_MCUFWDL_MACINI_RDY 0x00000008
#define R92C_MCUFWDL_BBINI_RDY 0x00000010
#define R92C_MCUFWDL_RFINI_RDY 0x00000020
#define R92C_MCUFWDL_WINTINI_RDY 0x00000040
#define R92C_MCUFWDL_RAM_DL_SEL 0x00000080
#define R92C_MCUFWDL_PAGE_M 0x00070000
#define R92C_MCUFWDL_PAGE_S 16
#define R92C_MCUFWDL_CPRST 0x00800000
/* Bits for R88E_HIMR. */
#define R88E_HIMR_CPWM 0x00000100
#define R88E_HIMR_CPWM2 0x00000200
#define R88E_HIMR_TBDER 0x04000000
#define R88E_HIMR_PSTIMEOUT 0x20000000
/* Bits for R88E_HIMRE.*/
#define R88E_HIMRE_RXFOVW 0x00000100
#define R88E_HIMRE_TXFOVW 0x00000200
#define R88E_HIMRE_RXERR 0x00000400
#define R88E_HIMRE_TXERR 0x00000800
/* Bits for R92C_EFUSE_ACCESS. */
#define R92C_EFUSE_ACCESS_OFF 0x00
#define R92C_EFUSE_ACCESS_ON 0x69
/* Bits for R92C_HPON_FSM. */
#define R92C_HPON_FSM_CHIP_BONDING_ID_S 22
#define R92C_HPON_FSM_CHIP_BONDING_ID_M 0x00c00000
#define R92C_HPON_FSM_CHIP_BONDING_ID_92C_1T2R 1
/* Bits for R92C_SYS_CFG. */
#define R92C_SYS_CFG_XCLK_VLD 0x00000001
#define R92C_SYS_CFG_ACLK_VLD 0x00000002
#define R92C_SYS_CFG_UCLK_VLD 0x00000004
#define R92C_SYS_CFG_PCLK_VLD 0x00000008
#define R92C_SYS_CFG_PCIRSTB 0x00000010
#define R92C_SYS_CFG_V15_VLD 0x00000020
#define R92C_SYS_CFG_TRP_B15V_EN 0x00000080
#define R92C_SYS_CFG_SIC_IDLE 0x00000100
#define R92C_SYS_CFG_BD_MAC2 0x00000200
#define R92C_SYS_CFG_BD_MAC1 0x00000400
#define R92C_SYS_CFG_IC_MACPHY_MODE 0x00000800
#define R92C_SYS_CFG_CHIP_VER_RTL_M 0x0000f000
#define R92C_SYS_CFG_CHIP_VER_RTL_S 12
#define R92C_SYS_CFG_BT_FUNC 0x00010000
#define R92C_SYS_CFG_VENDOR_UMC 0x00080000
#define R92C_SYS_CFG_PAD_HWPD_IDN 0x00400000
#define R92C_SYS_CFG_TRP_VAUX_EN 0x00800000
#define R92C_SYS_CFG_TRP_BT_EN 0x01000000
#define R92C_SYS_CFG_BD_PKG_SEL 0x02000000
#define R92C_SYS_CFG_BD_HCI_SEL 0x04000000
#define R92C_SYS_CFG_TYPE_92C 0x08000000
/* Bits for R92C_CR. */
#define R92C_CR_HCI_TXDMA_EN 0x00000001
#define R92C_CR_HCI_RXDMA_EN 0x00000002
#define R92C_CR_TXDMA_EN 0x00000004
#define R92C_CR_RXDMA_EN 0x00000008
#define R92C_CR_PROTOCOL_EN 0x00000010
#define R92C_CR_SCHEDULE_EN 0x00000020
#define R92C_CR_MACTXEN 0x00000040
#define R92C_CR_MACRXEN 0x00000080
#define R92C_CR_ENSEC 0x00000200
#define R92C_CR_CALTMR_EN 0x00000400
#define R92C_CR_NETTYPE_S 16
#define R92C_CR_NETTYPE_M 0x00030000
#define R92C_CR_NETTYPE_NOLINK 0
#define R92C_CR_NETTYPE_ADHOC 1
#define R92C_CR_NETTYPE_INFRA 2
#define R92C_CR_NETTYPE_AP 3
/* Bits for R92C_PBP. */
#define R92C_PBP_PSRX_M 0x0f
#define R92C_PBP_PSRX_S 0
#define R92C_PBP_PSTX_M 0xf0
#define R92C_PBP_PSTX_S 4
#define R92C_PBP_64 0
#define R92C_PBP_128 1
#define R92C_PBP_256 2
#define R92C_PBP_512 3
#define R92C_PBP_1024 4
/* Bits for R92C_TRXDMA_CTRL. */
#define R92C_TRXDMA_CTRL_RXDMA_AGG_EN 0x0004
#define R92C_TRXDMA_CTRL_TXDMA_VOQ_MAP_M 0x0030
#define R92C_TRXDMA_CTRL_TXDMA_VOQ_MAP_S 4
#define R92C_TRXDMA_CTRL_TXDMA_VIQ_MAP_M 0x00c0
#define R92C_TRXDMA_CTRL_TXDMA_VIQ_MAP_S 6
#define R92C_TRXDMA_CTRL_TXDMA_BEQ_MAP_M 0x0300
#define R92C_TRXDMA_CTRL_TXDMA_BEQ_MAP_S 8
#define R92C_TRXDMA_CTRL_TXDMA_BKQ_MAP_M 0x0c00
#define R92C_TRXDMA_CTRL_TXDMA_BKQ_MAP_S 10
#define R92C_TRXDMA_CTRL_TXDMA_MGQ_MAP_M 0x3000
#define R92C_TRXDMA_CTRL_TXDMA_MGQ_MAP_S 12
#define R92C_TRXDMA_CTRL_TXDMA_HIQ_MAP_M 0xc000
#define R92C_TRXDMA_CTRL_TXDMA_HIQ_MAP_S 14
#define R92C_TRXDMA_CTRL_QUEUE_LOW 1
#define R92C_TRXDMA_CTRL_QUEUE_NORMAL 2
#define R92C_TRXDMA_CTRL_QUEUE_HIGH 3
#define R92C_TRXDMA_CTRL_QMAP_M 0xfff0
/* Shortcuts. */
#define R92C_TRXDMA_CTRL_QMAP_3EP 0xf5b0
#define R92C_TRXDMA_CTRL_QMAP_HQ_LQ 0xf5f0
#define R92C_TRXDMA_CTRL_QMAP_HQ_NQ 0xfaf0
#define R92C_TRXDMA_CTRL_QMAP_LQ 0x5550
#define R92C_TRXDMA_CTRL_QMAP_NQ 0xaaa0
#define R92C_TRXDMA_CTRL_QMAP_HQ 0xfff0
/* Bits for R92C_LLT_INIT. */
#define R92C_LLT_INIT_DATA_M 0x000000ff
#define R92C_LLT_INIT_DATA_S 0
#define R92C_LLT_INIT_ADDR_M 0x0000ff00
#define R92C_LLT_INIT_ADDR_S 8
#define R92C_LLT_INIT_OP_M 0xc0000000
#define R92C_LLT_INIT_OP_S 30
#define R92C_LLT_INIT_OP_NO_ACTIVE 0
#define R92C_LLT_INIT_OP_WRITE 1
/* Bits for R92C_RQPN. */
#define R92C_RQPN_HPQ_M 0x000000ff
#define R92C_RQPN_HPQ_S 0
#define R92C_RQPN_LPQ_M 0x0000ff00
#define R92C_RQPN_LPQ_S 8
#define R92C_RQPN_PUBQ_M 0x00ff0000
#define R92C_RQPN_PUBQ_S 16
#define R92C_RQPN_LD 0x80000000
/* Bits for R92C_TDECTRL. */
#define R92C_TDECTRL_BLK_DESC_NUM_M 0x0000000f
#define R92C_TDECTRL_BLK_DESC_NUM_S 4
/* Bits for R92C_FWHW_TXQ_CTRL. */
#define R92C_FWHW_TXQ_CTRL_AMPDU_RTY_NEW 0x80
/* Bits for R92C_SPEC_SIFS. */
#define R92C_SPEC_SIFS_CCK_M 0x00ff
#define R92C_SPEC_SIFS_CCK_S 0
#define R92C_SPEC_SIFS_OFDM_M 0xff00
#define R92C_SPEC_SIFS_OFDM_S 8
/* Bits for R92C_RL. */
#define R92C_RL_LRL_M 0x003f
#define R92C_RL_LRL_S 0
#define R92C_RL_SRL_M 0x3f00
#define R92C_RL_SRL_S 8
/* Bits for R92C_RRSR. */
#define R92C_RRSR_RATE_BITMAP_M 0x000fffff
#define R92C_RRSR_RATE_BITMAP_S 0
#define R92C_RRSR_RATE_CCK_ONLY_1M 0xffff1
#define R92C_RRSR_RSC_LOWSUBCHNL 0x00200000
#define R92C_RRSR_RSC_UPSUBCHNL 0x00400000
#define R92C_RRSR_SHORT 0x00800000
/* Bits for R92C_EDCA_XX_PARAM. */
#define R92C_EDCA_PARAM_AIFS_M 0x000000ff
#define R92C_EDCA_PARAM_AIFS_S 0
#define R92C_EDCA_PARAM_ECWMIN_M 0x00000f00
#define R92C_EDCA_PARAM_ECWMIN_S 8
#define R92C_EDCA_PARAM_ECWMAX_M 0x0000f000
#define R92C_EDCA_PARAM_ECWMAX_S 12
#define R92C_EDCA_PARAM_TXOP_M 0xffff0000
#define R92C_EDCA_PARAM_TXOP_S 16
/* Bits for R92C_BCN_CTRL. */
#define R92C_BCN_CTRL_EN_MBSSID 0x02
#define R92C_BCN_CTRL_TXBCN_RPT 0x04
#define R92C_BCN_CTRL_EN_BCN 0x08
#define R92C_BCN_CTRL_DIS_TSF_UDT0 0x10
/* Bits for R92C_APSD_CTRL. */
#define R92C_APSD_CTRL_OFF 0x40
#define R92C_APSD_CTRL_OFF_STATUS 0x80
/* Bits for R92C_BWOPMODE. */
#define R92C_BWOPMODE_11J 0x01
#define R92C_BWOPMODE_5G 0x02
#define R92C_BWOPMODE_20MHZ 0x04
/* Bits for R92C_RCR. */
#define R92C_RCR_AAP 0x00000001
#define R92C_RCR_APM 0x00000002
#define R92C_RCR_AM 0x00000004
#define R92C_RCR_AB 0x00000008
#define R92C_RCR_ADD3 0x00000010
#define R92C_RCR_APWRMGT 0x00000020
#define R92C_RCR_CBSSID_DATA 0x00000040
#define R92C_RCR_CBSSID_BCN 0x00000080
#define R92C_RCR_ACRC32 0x00000100
#define R92C_RCR_AICV 0x00000200
#define R92C_RCR_ADF 0x00000800
#define R92C_RCR_ACF 0x00001000
#define R92C_RCR_AMF 0x00002000
#define R92C_RCR_HTC_LOC_CTRL 0x00004000
#define R92C_RCR_MFBEN 0x00400000
#define R92C_RCR_LSIGEN 0x00800000
#define R92C_RCR_ENMBID 0x01000000
#define R92C_RCR_APP_BA_SSN 0x08000000
#define R92C_RCR_APP_PHYSTS 0x10000000
#define R92C_RCR_APP_ICV 0x20000000
#define R92C_RCR_APP_MIC 0x40000000
#define R92C_RCR_APPFCS 0x80000000
/* Bits for R92C_CAMCMD. */
#define R92C_CAMCMD_ADDR_M 0x0000ffff
#define R92C_CAMCMD_ADDR_S 0
#define R92C_CAMCMD_WRITE 0x00010000
#define R92C_CAMCMD_CLR 0x40000000
#define R92C_CAMCMD_POLLING 0x80000000
/*
* Baseband registers.
*/
#define R92C_FPGA0_RFMOD 0x800
#define R92C_FPGA0_TXINFO 0x804
#define R92C_HSSI_PARAM1(chain) (0x820 + (chain) * 8)
#define R92C_HSSI_PARAM2(chain) (0x824 + (chain) * 8)
#define R92C_TXAGC_RATE18_06(i) (((i) == 0) ? 0xe00 : 0x830)
#define R92C_TXAGC_RATE54_24(i) (((i) == 0) ? 0xe04 : 0x834)
#define R92C_TXAGC_A_CCK1_MCS32 0xe08
#define R92C_TXAGC_B_CCK1_55_MCS32 0x838
#define R92C_TXAGC_B_CCK11_A_CCK2_11 0x86c
#define R92C_TXAGC_MCS03_MCS00(i) (((i) == 0) ? 0xe10 : 0x83c)
#define R92C_TXAGC_MCS07_MCS04(i) (((i) == 0) ? 0xe14 : 0x848)
#define R92C_TXAGC_MCS11_MCS08(i) (((i) == 0) ? 0xe18 : 0x84c)
#define R92C_TXAGC_MCS15_MCS12(i) (((i) == 0) ? 0xe1c : 0x868)
#define R92C_LSSI_PARAM(chain) (0x840 + (chain) * 4)
#define R92C_FPGA0_RFIFACEOE(chain) (0x860 + (chain) * 4)
#define R92C_FPGA0_RFIFACESW(idx) (0x870 + (idx) * 4)
#define R92C_FPGA0_RFPARAM(idx) (0x878 + (idx) * 4)
#define R92C_FPGA0_ANAPARAM2 0x884
#define R92C_LSSI_READBACK(chain) (0x8a0 + (chain) * 4)
#define R92C_HSPI_READBACK(chain) (0x8b8 + (chain) * 4)
#define R92C_FPGA1_RFMOD 0x900
#define R92C_FPGA1_TXINFO 0x90c
#define R92C_CCK0_SYSTEM 0xa00
#define R92C_CCK0_AFESETTING 0xa04
#define R92C_OFDM0_TRXPATHENA 0xc04
#define R92C_OFDM0_TRMUXPAR 0xc08
#define R92C_OFDM0_AGCCORE1(chain) (0xc50 + (chain) * 8)
#define R92C_OFDM0_AGCPARAM1 0xc70
#define R92C_OFDM0_AGCRSSITABLE 0xc78
#define R92C_OFDM1_LSTF 0xd00
/* Bits for R92C_FPGA[01]_RFMOD. */
#define R92C_RFMOD_40MHZ 0x00000001
#define R92C_RFMOD_JAPAN 0x00000002
#define R92C_RFMOD_CCK_TXSC 0x00000030
#define R92C_RFMOD_CCK_EN 0x01000000
#define R92C_RFMOD_OFDM_EN 0x02000000
/* Bits for R92C_HSSI_PARAM1(i). */
#define R92C_HSSI_PARAM1_PI 0x00000100
/* Bits for R92C_HSSI_PARAM2(i). */
#define R92C_HSSI_PARAM2_CCK_HIPWR 0x00000200
#define R92C_HSSI_PARAM2_ADDR_LENGTH 0x00000400
#define R92C_HSSI_PARAM2_DATA_LENGTH 0x00000800
#define R92C_HSSI_PARAM2_READ_ADDR_M 0x7f800000
#define R92C_HSSI_PARAM2_READ_ADDR_S 23
#define R92C_HSSI_PARAM2_READ_EDGE 0x80000000
/* Bits for R92C_TXAGC_A_CCK1_MCS32. */
#define R92C_TXAGC_A_CCK1_M 0x0000ff00
#define R92C_TXAGC_A_CCK1_S 8
/* Bits for R92C_TXAGC_B_CCK11_A_CCK2_11. */
#define R92C_TXAGC_B_CCK11_M 0x000000ff
#define R92C_TXAGC_B_CCK11_S 0
#define R92C_TXAGC_A_CCK2_M 0x0000ff00
#define R92C_TXAGC_A_CCK2_S 8
#define R92C_TXAGC_A_CCK55_M 0x00ff0000
#define R92C_TXAGC_A_CCK55_S 16
#define R92C_TXAGC_A_CCK11_M 0xff000000
#define R92C_TXAGC_A_CCK11_S 24
/* Bits for R92C_TXAGC_B_CCK1_55_MCS32. */
#define R92C_TXAGC_B_CCK1_M 0x0000ff00
#define R92C_TXAGC_B_CCK1_S 8
#define R92C_TXAGC_B_CCK2_M 0x00ff0000
#define R92C_TXAGC_B_CCK2_S 16
#define R92C_TXAGC_B_CCK55_M 0xff000000
#define R92C_TXAGC_B_CCK55_S 24
/* Bits for R92C_TXAGC_RATE18_06(x). */
#define R92C_TXAGC_RATE06_M 0x000000ff
#define R92C_TXAGC_RATE06_S 0
#define R92C_TXAGC_RATE09_M 0x0000ff00
#define R92C_TXAGC_RATE09_S 8
#define R92C_TXAGC_RATE12_M 0x00ff0000
#define R92C_TXAGC_RATE12_S 16
#define R92C_TXAGC_RATE18_M 0xff000000
#define R92C_TXAGC_RATE18_S 24
/* Bits for R92C_TXAGC_RATE54_24(x). */
#define R92C_TXAGC_RATE24_M 0x000000ff
#define R92C_TXAGC_RATE24_S 0
#define R92C_TXAGC_RATE36_M 0x0000ff00
#define R92C_TXAGC_RATE36_S 8
#define R92C_TXAGC_RATE48_M 0x00ff0000
#define R92C_TXAGC_RATE48_S 16
#define R92C_TXAGC_RATE54_M 0xff000000
#define R92C_TXAGC_RATE54_S 24
/* Bits for R92C_TXAGC_MCS03_MCS00(x). */
#define R92C_TXAGC_MCS00_M 0x000000ff
#define R92C_TXAGC_MCS00_S 0
#define R92C_TXAGC_MCS01_M 0x0000ff00
#define R92C_TXAGC_MCS01_S 8
#define R92C_TXAGC_MCS02_M 0x00ff0000
#define R92C_TXAGC_MCS02_S 16
#define R92C_TXAGC_MCS03_M 0xff000000
#define R92C_TXAGC_MCS03_S 24
/* Bits for R92C_TXAGC_MCS07_MCS04(x). */
#define R92C_TXAGC_MCS04_M 0x000000ff
#define R92C_TXAGC_MCS04_S 0
#define R92C_TXAGC_MCS05_M 0x0000ff00
#define R92C_TXAGC_MCS05_S 8
#define R92C_TXAGC_MCS06_M 0x00ff0000
#define R92C_TXAGC_MCS06_S 16
#define R92C_TXAGC_MCS07_M 0xff000000
#define R92C_TXAGC_MCS07_S 24
/* Bits for R92C_TXAGC_MCS11_MCS08(x). */
#define R92C_TXAGC_MCS08_M 0x000000ff
#define R92C_TXAGC_MCS08_S 0
#define R92C_TXAGC_MCS09_M 0x0000ff00
#define R92C_TXAGC_MCS09_S 8
#define R92C_TXAGC_MCS10_M 0x00ff0000
#define R92C_TXAGC_MCS10_S 16
#define R92C_TXAGC_MCS11_M 0xff000000
#define R92C_TXAGC_MCS11_S 24
/* Bits for R92C_TXAGC_MCS15_MCS12(x). */
#define R92C_TXAGC_MCS12_M 0x000000ff
#define R92C_TXAGC_MCS12_S 0
#define R92C_TXAGC_MCS13_M 0x0000ff00
#define R92C_TXAGC_MCS13_S 8
#define R92C_TXAGC_MCS14_M 0x00ff0000
#define R92C_TXAGC_MCS14_S 16
#define R92C_TXAGC_MCS15_M 0xff000000
#define R92C_TXAGC_MCS15_S 24
/* Bits for R92C_LSSI_PARAM(i). */
#define R92C_LSSI_PARAM_DATA_M 0x000fffff
#define R92C_LSSI_PARAM_DATA_S 0
#define R92C_LSSI_PARAM_ADDR_M 0x03f00000
#define R92C_LSSI_PARAM_ADDR_S 20
#define R88E_LSSI_PARAM_ADDR_M 0x0ff00000
#define R88E_LSSI_PARAM_ADDR_S 20
/* Bits for R92C_FPGA0_ANAPARAM2. */
#define R92C_FPGA0_ANAPARAM2_CBW20 0x00000400
/* Bits for R92C_LSSI_READBACK(i). */
#define R92C_LSSI_READBACK_DATA_M 0x000fffff
#define R92C_LSSI_READBACK_DATA_S 0
/* Bits for R92C_OFDM0_AGCCORE1(i). */
#define R92C_OFDM0_AGCCORE1_GAIN_M 0x0000007f
#define R92C_OFDM0_AGCCORE1_GAIN_S 0
/*
* USB registers.
*/
#define R92C_USB_INFO 0xfe17
#define R92C_USB_SPECIAL_OPTION 0xfe55
#define R92C_USB_HCPWM 0xfe57
#define R92C_USB_HRPWM 0xfe58
#define R92C_USB_DMA_AGG_TO 0xfe5b
#define R92C_USB_AGG_TO 0xfe5c
#define R92C_USB_AGG_TH 0xfe5d
#define R92C_USB_VID 0xfe60
#define R92C_USB_PID 0xfe62
#define R92C_USB_OPTIONAL 0xfe64
#define R92C_USB_EP 0xfe65
#define R92C_USB_PHY 0xfe68
#define R92C_USB_MAC_ADDR 0xfe70
#define R92C_USB_STRING 0xfe80
/* Bits for R92C_USB_SPECIAL_OPTION. */
#define R92C_USB_SPECIAL_OPTION_AGG_EN 0x08
#define R92C_USB_SPECIAL_OPTION_INT_BULK_SEL 0x10
/* Bits for R92C_USB_EP. */
#define R92C_USB_EP_HQ_M 0x000f
#define R92C_USB_EP_HQ_S 0
#define R92C_USB_EP_NQ_M 0x00f0
#define R92C_USB_EP_NQ_S 4
#define R92C_USB_EP_LQ_M 0x0f00
#define R92C_USB_EP_LQ_S 8
/*
* Firmware base address.
*/
#define R92C_FW_START_ADDR 0x1000
#define R92C_FW_PAGE_SIZE 4096
/*
* RF (6052) registers.
*/
#define R92C_RF_AC 0x00
#define R92C_RF_IQADJ_G(i) (0x01 + (i))
#define R92C_RF_POW_TRSW 0x05
#define R92C_RF_GAIN_RX 0x06
#define R92C_RF_GAIN_TX 0x07
#define R92C_RF_TXM_IDAC 0x08
#define R92C_RF_BS_IQGEN 0x0f
#define R92C_RF_MODE1 0x10
#define R92C_RF_MODE2 0x11
#define R92C_RF_RX_AGC_HP 0x12
#define R92C_RF_TX_AGC 0x13
#define R92C_RF_BIAS 0x14
#define R92C_RF_IPA 0x15
#define R92C_RF_POW_ABILITY 0x17
#define R92C_RF_CHNLBW 0x18
#define R92C_RF_RX_G1 0x1a
#define R92C_RF_RX_G2 0x1b
#define R92C_RF_RX_BB2 0x1c
#define R92C_RF_RX_BB1 0x1d
#define R92C_RF_RCK1 0x1e
#define R92C_RF_RCK2 0x1f
#define R92C_RF_TX_G(i) (0x20 + (i))
#define R92C_RF_TX_BB1 0x23
#define R92C_RF_T_METER 0x24
#define R92C_RF_SYN_G(i) (0x25 + (i))
#define R92C_RF_RCK_OS 0x30
#define R92C_RF_TXPA_G(i) (0x31 + (i))
/* Bits for R92C_RF_AC. */
#define R92C_RF_AC_MODE_M 0x70000
#define R92C_RF_AC_MODE_S 16
#define R92C_RF_AC_MODE_STANDBY 1
/* Bits for R92C_RF_CHNLBW. */
#define R92C_RF_CHNLBW_CHNL_M 0x003ff
#define R92C_RF_CHNLBW_CHNL_S 0
#define R92C_RF_CHNLBW_BW20 0x00400
#define R88E_RF_CHNLBW_BW20 0x00c00
#define R92C_RF_CHNLBW_LCSTART 0x08000
/*
* CAM entries.
*/
#define R92C_CAM_ENTRY_COUNT 32
#define R92C_CAM_CTL0(entry) ((entry) * 8 + 0)
#define R92C_CAM_CTL1(entry) ((entry) * 8 + 1)
#define R92C_CAM_KEY(entry, i) ((entry) * 8 + 2 + (i))
/* Bits for R92C_CAM_CTL0(i). */
#define R92C_CAM_KEYID_M 0x00000003
#define R92C_CAM_KEYID_S 0
#define R92C_CAM_ALGO_M 0x0000001c
#define R92C_CAM_ALGO_S 2
#define R92C_CAM_ALGO_NONE 0
#define R92C_CAM_ALGO_WEP40 1
#define R92C_CAM_ALGO_TKIP 2
#define R92C_CAM_ALGO_AES 4
#define R92C_CAM_ALGO_WEP104 5
#define R92C_CAM_VALID 0x00008000
#define R92C_CAM_MACLO_M 0xffff0000
#define R92C_CAM_MACLO_S 16
/* Rate adaptation modes. */
#define R92C_RAID_11GN 1
#define R92C_RAID_11N 3
#define R92C_RAID_11BG 4
#define R92C_RAID_11G 5 /* "pure" 11g */
#define R92C_RAID_11B 6
/* Macros to access unaligned little-endian memory. */
#define LE_READ_2(x) ((x)[0] | (x)[1] << 8)
#define LE_READ_4(x) ((x)[0] | (x)[1] << 8 | (x)[2] << 16 | (x)[3] << 24)
/*
* Macros to access subfields in registers.
*/
/* Mask and Shift (getter). */
#define MS(val, field) \
(((val) & field##_M) >> field##_S)
/* Shift and Mask (setter). */
#define SM(field, val) \
(((val) << field##_S) & field##_M)
/* Rewrite. */
#define RW(var, field, val) \
(((var) & ~field##_M) | SM(field, val))
/*
* Firmware image header.
*/
struct r92c_fw_hdr {
/* QWORD0 */
uint16_t signature;
uint8_t category;
uint8_t function;
uint16_t version;
uint16_t subversion;
/* QWORD1 */
uint8_t month;
uint8_t date;
uint8_t hour;
uint8_t minute;
uint16_t ramcodesize;
uint16_t reserved2;
/* QWORD2 */
uint32_t svnidx;
uint32_t reserved3;
/* QWORD3 */
uint32_t reserved4;
uint32_t reserved5;
} __packed;
/*
* Host to firmware commands.
*/
struct r92c_fw_cmd {
uint8_t id;
#define R92C_CMD_AP_OFFLOAD 0
#define R92C_CMD_SET_PWRMODE 1
#define R92C_CMD_JOINBSS_RPT 2
#define R92C_CMD_RSVD_PAGE 3
#define R92C_CMD_RSSI 4
#define R92C_CMD_RSSI_SETTING 5
#define R92C_CMD_MACID_CONFIG 6
#define R92C_CMD_MACID_PS_MODE 7
#define R92C_CMD_P2P_PS_OFFLOAD 8
#define R92C_CMD_SELECTIVE_SUSPEND 9
#define R92C_CMD_FLAG_EXT 0x80
uint8_t msg[5];
} __packed;
/* Structure for R92C_CMD_RSSI_SETTING. */
struct r92c_fw_cmd_rssi {
uint8_t macid;
uint8_t reserved;
uint8_t pwdb;
} __packed;
/* Structure for R92C_CMD_MACID_CONFIG. */
struct r92c_fw_cmd_macid_cfg {
uint32_t mask;
uint8_t macid;
#define URTWN_MACID_BSS 0
#define URTWN_MACID_BC 4 /* Broadcast. */
#define URTWN_MACID_VALID 0x80
} __packed;
/*
* RTL8192CU ROM image.
*/
struct r92c_rom {
uint16_t id; /* 0x8192 */
uint8_t reserved1[5];
uint8_t dbg_sel;
uint16_t reserved2;
uint16_t vid;
uint16_t pid;
uint8_t usb_opt;
uint8_t ep_setting;
uint16_t reserved3;
uint8_t usb_phy;
uint8_t reserved4[3];
uint8_t macaddr[6];
uint8_t string[61]; /* "Realtek" */
uint8_t subcustomer_id;
uint8_t cck_tx_pwr[R92C_MAX_CHAINS][3];
uint8_t ht40_1s_tx_pwr[R92C_MAX_CHAINS][3];
uint8_t ht40_2s_tx_pwr_diff[3];
uint8_t ht20_tx_pwr_diff[3];
uint8_t ofdm_tx_pwr_diff[3];
uint8_t ht40_max_pwr[3];
uint8_t ht20_max_pwr[3];
uint8_t xtal_calib;
uint8_t tssi[R92C_MAX_CHAINS];
uint8_t thermal_meter;
uint8_t rf_opt1;
#define R92C_ROM_RF1_REGULATORY_M 0x07
#define R92C_ROM_RF1_REGULATORY_S 0
#define R92C_ROM_RF1_BOARD_TYPE_M 0xe0
#define R92C_ROM_RF1_BOARD_TYPE_S 5
#define R92C_BOARD_TYPE_DONGLE 0
#define R92C_BOARD_TYPE_HIGHPA 1
#define R92C_BOARD_TYPE_MINICARD 2
#define R92C_BOARD_TYPE_SOLO 3
#define R92C_BOARD_TYPE_COMBO 4
uint8_t rf_opt2;
uint8_t rf_opt3;
uint8_t rf_opt4;
uint8_t channel_plan;
uint8_t version;
uint8_t curstomer_id;
} __packed;
/* Rx MAC descriptor. */
struct r92c_rx_stat {
uint32_t rxdw0;
#define R92C_RXDW0_PKTLEN_M 0x00003fff
#define R92C_RXDW0_PKTLEN_S 0
#define R92C_RXDW0_CRCERR 0x00004000
#define R92C_RXDW0_ICVERR 0x00008000
#define R92C_RXDW0_INFOSZ_M 0x000f0000
#define R92C_RXDW0_INFOSZ_S 16
#define R92C_RXDW0_QOS 0x00800000
#define R92C_RXDW0_SHIFT_M 0x03000000
#define R92C_RXDW0_SHIFT_S 24
#define R92C_RXDW0_PHYST 0x04000000
#define R92C_RXDW0_DECRYPTED 0x08000000
uint32_t rxdw1;
uint32_t rxdw2;
#define R92C_RXDW2_PKTCNT_M 0x00ff0000
#define R92C_RXDW2_PKTCNT_S 16
uint32_t rxdw3;
#define R92C_RXDW3_RATE_M 0x0000003f
#define R92C_RXDW3_RATE_S 0
#define R92C_RXDW3_HT 0x00000040
#define R92C_RXDW3_HTC 0x00000400
uint32_t rxdw4;
uint32_t rxdw5;
} __packed __attribute__((aligned(4)));
/* Rx PHY descriptor. */
struct r92c_rx_phystat {
uint32_t phydw0;
uint32_t phydw1;
uint32_t phydw2;
uint32_t phydw3;
uint32_t phydw4;
uint32_t phydw5;
uint32_t phydw6;
uint32_t phydw7;
} __packed __attribute__((aligned(4)));
/* Rx PHY CCK descriptor. */
struct r92c_rx_cck {
uint8_t adc_pwdb[4];
uint8_t sq_rpt;
uint8_t agc_rpt;
} __packed;
struct r88e_rx_cck {
uint8_t path_agc[2];
uint8_t sig_qual;
uint8_t agc_rpt;
uint8_t rpt_b;
uint8_t reserved1;
uint8_t noise_power;
uint8_t path_cfotail[2];
uint8_t pcts_mask[2];
uint8_t stream_rxevm[2];
uint8_t path_rxsnr[2];
uint8_t noise_power_db_lsb;
uint8_t reserved2[3];
uint8_t stream_csi[2];
uint8_t stream_target_csi[2];
uint8_t sig_evm;
uint8_t reserved3;
uint8_t reserved4;
} __packed;
/* Tx MAC descriptor. */
struct r92c_tx_desc {
uint32_t txdw0;
#define R92C_TXDW0_PKTLEN_M 0x0000ffff
#define R92C_TXDW0_PKTLEN_S 0
#define R92C_TXDW0_OFFSET_M 0x00ff0000
#define R92C_TXDW0_OFFSET_S 16
#define R92C_TXDW0_BMCAST 0x01000000
#define R92C_TXDW0_LSG 0x04000000
#define R92C_TXDW0_FSG 0x08000000
#define R92C_TXDW0_OWN 0x80000000
uint32_t txdw1;
#define R92C_TXDW1_MACID_M 0x0000001f
#define R92C_TXDW1_MACID_S 0
#define R88E_TXDW1_MACID_M 0x0000003f
#define R88E_TXDW1_MACID_S 0
#define R92C_TXDW1_AGGEN 0x00000020
#define R92C_TXDW1_AGGBK 0x00000040
#define R92C_TXDW1_QSEL_M 0x00001f00
#define R92C_TXDW1_QSEL_S 8
#define R92C_TXDW1_QSEL_BE 0x00
#define R92C_TXDW1_QSEL_MGNT 0x12
#define R92C_TXDW1_RAID_M 0x000f0000
#define R92C_TXDW1_RAID_S 16
#define R92C_TXDW1_CIPHER_M 0x00c00000
#define R92C_TXDW1_CIPHER_S 22
#define R92C_TXDW1_CIPHER_NONE 0
#define R92C_TXDW1_CIPHER_RC4 1
#define R92C_TXDW1_CIPHER_AES 3
#define R92C_TXDW1_PKTOFF_M 0x7c000000
#define R92C_TXDW1_PKTOFF_S 26
uint32_t txdw2;
#define R88E_TXDW2_AGGBK 0x00010000
uint16_t txdw3;
uint16_t txdseq;
uint32_t txdw4;
#define R92C_TXDW4_RTSRATE_M 0x0000003f
#define R92C_TXDW4_RTSRATE_S 0
#define R92C_TXDW4_QOS 0x00000040
#define R92C_TXDW4_HWSEQ 0x00000080
#define R92C_TXDW4_DRVRATE 0x00000100
#define R92C_TXDW4_CTS2SELF 0x00000800
#define R92C_TXDW4_RTSEN 0x00001000
#define R92C_TXDW4_HWRTSEN 0x00002000
#define R92C_TXDW4_SCO_M 0x003f0000
#define R92C_TXDW4_SCO_S 20
#define R92C_TXDW4_SCO_SCA 1
#define R92C_TXDW4_SCO_SCB 2
#define R92C_TXDW4_40MHZ 0x02000000
uint32_t txdw5;
#define R92C_TXDW5_DATARATE_M 0x0000003f
#define R92C_TXDW5_DATARATE_S 0
#define R92C_TXDW5_SGI 0x00000040
#define R92C_TXDW5_AGGNUM_M 0xff000000
#define R92C_TXDW5_AGGNUM_S 24
uint32_t txdw6;
uint16_t txdsum;
uint16_t pad;
} __packed __attribute__((aligned(4)));
/*
* Driver definitions.
*/
#define URTWN_RX_LIST_COUNT 1
#define URTWN_TX_LIST_COUNT 8
#define URTWN_HOST_CMD_RING_COUNT 32
#define URTWN_RXBUFSZ (16 * 1024)
#define URTWN_TXBUFSZ (sizeof(struct r92c_tx_desc) + IEEE80211_MAX_LEN)
#define URTWN_RX_DESC_SIZE (sizeof(struct r92c_rx_stat))
#define URTWN_TX_DESC_SIZE (sizeof(struct r92c_tx_desc))
#define URTWN_RIDX_COUNT 28
#define URTWN_TX_TIMEOUT 5000 /* ms */
#define URTWN_LED_LINK 0
#define URTWN_LED_DATA 1
struct urtwn_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
uint8_t wr_dbm_antsignal;
} __packed __aligned(8);
#define URTWN_RX_RADIOTAP_PRESENT \
(1 << IEEE80211_RADIOTAP_FLAGS | \
1 << IEEE80211_RADIOTAP_RATE | \
1 << IEEE80211_RADIOTAP_CHANNEL | \
1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL)
struct urtwn_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed __aligned(8);
#define URTWN_TX_RADIOTAP_PRESENT \
(1 << IEEE80211_RADIOTAP_FLAGS | \
1 << IEEE80211_RADIOTAP_CHANNEL)
struct urtwn_softc;
struct urtwn_data {
struct urtwn_softc *sc;
uint8_t *buf;
uint16_t buflen;
struct mbuf *m;
struct ieee80211_node *ni;
STAILQ_ENTRY(urtwn_data) next;
};
typedef STAILQ_HEAD(, urtwn_data) urtwn_datahead;
struct urtwn_cmdq {
void *arg0;
void *arg1;
void (*func)(void *);
struct ieee80211_key *k;
struct ieee80211_key key;
uint8_t mac[IEEE80211_ADDR_LEN];
uint8_t wcid;
};
struct urtwn_fw_info {
const uint8_t *data;
size_t size;
};
struct urtwn_vap {
struct ieee80211vap vap;
struct ieee80211_beacon_offsets bo;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define URTWN_VAP(vap) ((struct urtwn_vap *)(vap))
struct urtwn_host_cmd {
void (*cb)(struct urtwn_softc *, void *);
uint8_t data[256];
};
struct urtwn_cmd_newstate {
enum ieee80211_state state;
int arg;
};
struct urtwn_cmd_key {
struct ieee80211_key key;
uint16_t associd;
};
enum {
URTWN_BULK_RX,
URTWN_BULK_TX_BE, /* = WME_AC_BE */
URTWN_BULK_TX_BK, /* = WME_AC_BK */
URTWN_BULK_TX_VI, /* = WME_AC_VI */
URTWN_BULK_TX_VO, /* = WME_AC_VI */
URTWN_N_TRANSFER = 5,
};
#define URTWN_EP_QUEUES URTWN_BULK_RX
struct urtwn_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct usb_device *sc_udev;
int ac2idx[WME_NUM_AC];
u_int sc_flags;
#define URTWN_FLAG_CCK_HIPWR 0x01
#define URTWN_DETACHED 0x02
+#define URTWN_RUNNING 0x04
u_int chip;
#define URTWN_CHIP_92C 0x01
#define URTWN_CHIP_92C_1T2R 0x02
#define URTWN_CHIP_UMC 0x04
#define URTWN_CHIP_UMC_A_CUT 0x08
#define URTWN_CHIP_88E 0x10
void (*sc_rf_write)(struct urtwn_softc *,
int, uint8_t, uint32_t);
int (*sc_power_on)(struct urtwn_softc *);
int (*sc_dma_init)(struct urtwn_softc *);
uint8_t board_type;
uint8_t regulatory;
uint8_t pa_setting;
int avg_pwdb;
int thcal_state;
int thcal_lctemp;
int ntxchains;
int nrxchains;
int ledlink;
int sc_txtimer;
int fwcur;
struct urtwn_data sc_rx[URTWN_RX_LIST_COUNT];
urtwn_datahead sc_rx_active;
urtwn_datahead sc_rx_inactive;
struct urtwn_data sc_tx[URTWN_TX_LIST_COUNT];
urtwn_datahead sc_tx_active;
urtwn_datahead sc_tx_inactive;
urtwn_datahead sc_tx_pending;
const char *fwname;
const struct firmware *fw_fp;
struct urtwn_fw_info fw;
void *fw_virtaddr;
struct r92c_rom rom;
uint8_t r88e_rom[512];
uint8_t cck_tx_pwr[6];
uint8_t ht40_tx_pwr[5];
int8_t bw20_tx_pwr_diff;
int8_t ofdm_tx_pwr_diff;
- uint8_t sc_bssid[IEEE80211_ADDR_LEN];
struct callout sc_watchdog_ch;
struct mtx sc_mtx;
/* need to be power of 2, otherwise URTWN_CMDQ_GET fails */
#define URTWN_CMDQ_MAX 16
#define URTWN_CMDQ_MASQ (URTWN_CMDQ_MAX - 1)
struct urtwn_cmdq cmdq[URTWN_CMDQ_MAX];
struct task cmdq_task;
uint32_t cmdq_store;
uint8_t cmdq_exec;
uint8_t cmdq_run;
uint8_t cmdq_key_set;
#define URTWN_CMDQ_ABORT 0
#define URTWN_CMDQ_GO 1
uint32_t rf_chnlbw[R92C_MAX_CHAINS];
struct usb_xfer *sc_xfer[URTWN_N_TRANSFER];
struct urtwn_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct urtwn_tx_radiotap_header sc_txtap;
int sc_txtap_len;
};
#define URTWN_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define URTWN_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define URTWN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
/*
* MAC initialization values.
*/
static const struct {
uint16_t reg;
uint8_t val;
} rtl8188eu_mac[] = {
{ 0x026, 0x41 }, { 0x027, 0x35 }, { 0x040, 0x00 }, { 0x428, 0x0a },
{ 0x429, 0x10 }, { 0x430, 0x00 }, { 0x431, 0x01 }, { 0x432, 0x02 },
{ 0x433, 0x04 }, { 0x434, 0x05 }, { 0x435, 0x06 }, { 0x436, 0x07 },
{ 0x437, 0x08 }, { 0x438, 0x00 }, { 0x439, 0x00 }, { 0x43a, 0x01 },
{ 0x43b, 0x02 }, { 0x43c, 0x04 }, { 0x43d, 0x05 }, { 0x43e, 0x06 },
{ 0x43f, 0x07 }, { 0x440, 0x5d }, { 0x441, 0x01 }, { 0x442, 0x00 },
{ 0x444, 0x15 }, { 0x445, 0xf0 }, { 0x446, 0x0f }, { 0x447, 0x00 },
{ 0x458, 0x41 }, { 0x459, 0xa8 }, { 0x45a, 0x72 }, { 0x45b, 0xb9 },
{ 0x460, 0x66 }, { 0x461, 0x66 }, { 0x480, 0x08 }, { 0x4c8, 0xff },
{ 0x4c9, 0x08 }, { 0x4cc, 0xff }, { 0x4cd, 0xff }, { 0x4ce, 0x01 },
{ 0x4d3, 0x01 }, { 0x500, 0x26 }, { 0x501, 0xa2 }, { 0x502, 0x2f },
{ 0x503, 0x00 }, { 0x504, 0x28 }, { 0x505, 0xa3 }, { 0x506, 0x5e },
{ 0x507, 0x00 }, { 0x508, 0x2b }, { 0x509, 0xa4 }, { 0x50a, 0x5e },
{ 0x50b, 0x00 }, { 0x50c, 0x4f }, { 0x50d, 0xa4 }, { 0x50e, 0x00 },
{ 0x50f, 0x00 }, { 0x512, 0x1c }, { 0x514, 0x0a }, { 0x516, 0x0a },
{ 0x525, 0x4f }, { 0x550, 0x10 }, { 0x551, 0x10 }, { 0x559, 0x02 },
{ 0x55d, 0xff }, { 0x605, 0x30 }, { 0x608, 0x0e }, { 0x609, 0x2a },
{ 0x620, 0xff }, { 0x621, 0xff }, { 0x622, 0xff }, { 0x623, 0xff },
{ 0x624, 0xff }, { 0x625, 0xff }, { 0x626, 0xff }, { 0x627, 0xff },
{ 0x652, 0x20 }, { 0x63c, 0x0a }, { 0x63d, 0x0a }, { 0x63e, 0x0e },
{ 0x63f, 0x0e }, { 0x640, 0x40 }, { 0x66e, 0x05 }, { 0x700, 0x21 },
{ 0x701, 0x43 }, { 0x702, 0x65 }, { 0x703, 0x87 }, { 0x708, 0x21 },
{ 0x709, 0x43 }, { 0x70a, 0x65 }, { 0x70b, 0x87 }
}, rtl8192cu_mac[] = {
{ 0x420, 0x80 }, { 0x423, 0x00 }, { 0x430, 0x00 }, { 0x431, 0x00 },
{ 0x432, 0x00 }, { 0x433, 0x01 }, { 0x434, 0x04 }, { 0x435, 0x05 },
{ 0x436, 0x06 }, { 0x437, 0x07 }, { 0x438, 0x00 }, { 0x439, 0x00 },
{ 0x43a, 0x00 }, { 0x43b, 0x01 }, { 0x43c, 0x04 }, { 0x43d, 0x05 },
{ 0x43e, 0x06 }, { 0x43f, 0x07 }, { 0x440, 0x5d }, { 0x441, 0x01 },
{ 0x442, 0x00 }, { 0x444, 0x15 }, { 0x445, 0xf0 }, { 0x446, 0x0f },
{ 0x447, 0x00 }, { 0x458, 0x41 }, { 0x459, 0xa8 }, { 0x45a, 0x72 },
{ 0x45b, 0xb9 }, { 0x460, 0x66 }, { 0x461, 0x66 }, { 0x462, 0x08 },
{ 0x463, 0x03 }, { 0x4c8, 0xff }, { 0x4c9, 0x08 }, { 0x4cc, 0xff },
{ 0x4cd, 0xff }, { 0x4ce, 0x01 }, { 0x500, 0x26 }, { 0x501, 0xa2 },
{ 0x502, 0x2f }, { 0x503, 0x00 }, { 0x504, 0x28 }, { 0x505, 0xa3 },
{ 0x506, 0x5e }, { 0x507, 0x00 }, { 0x508, 0x2b }, { 0x509, 0xa4 },
{ 0x50a, 0x5e }, { 0x50b, 0x00 }, { 0x50c, 0x4f }, { 0x50d, 0xa4 },
{ 0x50e, 0x00 }, { 0x50f, 0x00 }, { 0x512, 0x1c }, { 0x514, 0x0a },
{ 0x515, 0x10 }, { 0x516, 0x0a }, { 0x517, 0x10 }, { 0x51a, 0x16 },
{ 0x524, 0x0f }, { 0x525, 0x4f }, { 0x546, 0x40 }, { 0x547, 0x00 },
{ 0x550, 0x10 }, { 0x551, 0x10 }, { 0x559, 0x02 }, { 0x55a, 0x02 },
{ 0x55d, 0xff }, { 0x605, 0x30 }, { 0x608, 0x0e }, { 0x609, 0x2a },
{ 0x652, 0x20 }, { 0x63c, 0x0a }, { 0x63d, 0x0e }, { 0x63e, 0x0a },
{ 0x63f, 0x0e }, { 0x66e, 0x05 }, { 0x700, 0x21 }, { 0x701, 0x43 },
{ 0x702, 0x65 }, { 0x703, 0x87 }, { 0x708, 0x21 }, { 0x709, 0x43 },
{ 0x70a, 0x65 }, { 0x70b, 0x87 }
};
/*
* Baseband initialization values.
*/
struct urtwn_bb_prog {
int count;
const uint16_t *regs;
const uint32_t *vals;
int agccount;
const uint32_t *agcvals;
};
/*
* RTL8192CU and RTL8192CE-VAU.
*/
static const uint16_t rtl8192ce_bb_regs[] = {
0x024, 0x028, 0x800, 0x804, 0x808, 0x80c, 0x810, 0x814, 0x818,
0x81c, 0x820, 0x824, 0x828, 0x82c, 0x830, 0x834, 0x838, 0x83c,
0x840, 0x844, 0x848, 0x84c, 0x850, 0x854, 0x858, 0x85c, 0x860,
0x864, 0x868, 0x86c, 0x870, 0x874, 0x878, 0x87c, 0x880, 0x884,
0x888, 0x88c, 0x890, 0x894, 0x898, 0x89c, 0x900, 0x904, 0x908,
0x90c, 0xa00, 0xa04, 0xa08, 0xa0c, 0xa10, 0xa14, 0xa18, 0xa1c,
0xa20, 0xa24, 0xa28, 0xa2c, 0xa70, 0xa74, 0xc00, 0xc04, 0xc08,
0xc0c, 0xc10, 0xc14, 0xc18, 0xc1c, 0xc20, 0xc24, 0xc28, 0xc2c,
0xc30, 0xc34, 0xc38, 0xc3c, 0xc40, 0xc44, 0xc48, 0xc4c, 0xc50,
0xc54, 0xc58, 0xc5c, 0xc60, 0xc64, 0xc68, 0xc6c, 0xc70, 0xc74,
0xc78, 0xc7c, 0xc80, 0xc84, 0xc88, 0xc8c, 0xc90, 0xc94, 0xc98,
0xc9c, 0xca0, 0xca4, 0xca8, 0xcac, 0xcb0, 0xcb4, 0xcb8, 0xcbc,
0xcc0, 0xcc4, 0xcc8, 0xccc, 0xcd0, 0xcd4, 0xcd8, 0xcdc, 0xce0,
0xce4, 0xce8, 0xcec, 0xd00, 0xd04, 0xd08, 0xd0c, 0xd10, 0xd14,
0xd18, 0xd2c, 0xd30, 0xd34, 0xd38, 0xd3c, 0xd40, 0xd44, 0xd48,
0xd4c, 0xd50, 0xd54, 0xd58, 0xd5c, 0xd60, 0xd64, 0xd68, 0xd6c,
0xd70, 0xd74, 0xd78, 0xe00, 0xe04, 0xe08, 0xe10, 0xe14, 0xe18,
0xe1c, 0xe28, 0xe30, 0xe34, 0xe38, 0xe3c, 0xe40, 0xe44, 0xe48,
0xe4c, 0xe50, 0xe54, 0xe58, 0xe5c, 0xe60, 0xe68, 0xe6c, 0xe70,
0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, 0xed0, 0xed4,
0xed8, 0xedc, 0xee0, 0xeec, 0xf14, 0xf4c, 0xf00
};
static const uint32_t rtl8192ce_bb_vals[] = {
0x0011800d, 0x00ffdb83, 0x80040002, 0x00000003, 0x0000fc00,
0x0000000a, 0x10005388, 0x020c3d10, 0x02200385, 0x00000000,
0x01000100, 0x00390004, 0x01000100, 0x00390004, 0x27272727,
0x27272727, 0x27272727, 0x27272727, 0x00010000, 0x00010000,
0x27272727, 0x27272727, 0x00000000, 0x00000000, 0x569a569a,
0x0c1b25a4, 0x66e60230, 0x061f0130, 0x27272727, 0x2b2b2b27,
0x07000700, 0x22184000, 0x08080808, 0x00000000, 0xc0083070,
0x000004d5, 0x00000000, 0xcc0000c0, 0x00000800, 0xfffffffe,
0x40302010, 0x00706050, 0x00000000, 0x00000023, 0x00000000,
0x81121313, 0x00d047c8, 0x80ff000c, 0x8c838300, 0x2e68120f,
0x9500bb78, 0x11144028, 0x00881117, 0x89140f00, 0x1a1b0000,
0x090e1317, 0x00000204, 0x00d30000, 0x101fbf00, 0x00000007,
0x48071d40, 0x03a05633, 0x000000e4, 0x6c6c6c6c, 0x08800000,
0x40000100, 0x08800000, 0x40000100, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x69e9ac44, 0x469652cf, 0x49795994,
0x0a97971c, 0x1f7c403f, 0x000100b7, 0xec020107, 0x007f037f,
0x6954341e, 0x43bc0094, 0x6954341e, 0x433c0094, 0x00000000,
0x5116848b, 0x47c00bff, 0x00000036, 0x2c7f000d, 0x018610db,
0x0000001f, 0x00b91612, 0x40000100, 0x20f60000, 0x40000100,
0x20200000, 0x00121820, 0x00000000, 0x00121820, 0x00007f7f,
0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x28000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x64b22427,
0x00766932, 0x00222222, 0x00000000, 0x37644302, 0x2f97d40c,
0x00080740, 0x00020403, 0x0000907f, 0x20010201, 0xa0633333,
0x3333bc43, 0x7a8f5b6b, 0xcc979975, 0x00000000, 0x80608000,
0x00000000, 0x00027293, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x6437140a, 0x00000000, 0x00000000, 0x30032064,
0x4653de68, 0x04518a3c, 0x00002101, 0x2a201c16, 0x1812362e,
0x322c2220, 0x000e3c24, 0x2a2a2a2a, 0x2a2a2a2a, 0x03902a2a,
0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x00000000,
0x1000dc1f, 0x10008c1f, 0x02140102, 0x681604c2, 0x01007c00,
0x01004800, 0xfb000000, 0x000028d1, 0x1000dc1f, 0x10008c1f,
0x02140102, 0x28160d05, 0x00000010, 0x001b25a4, 0x63db25a4,
0x63db25a4, 0x0c1b25a4, 0x0c1b25a4, 0x0c1b25a4, 0x0c1b25a4,
0x63db25a4, 0x0c1b25a4, 0x63db25a4, 0x63db25a4, 0x63db25a4,
0x63db25a4, 0x001b25a4, 0x001b25a4, 0x6fdb25a4, 0x00000003,
0x00000000, 0x00000300
};
static const uint32_t rtl8192ce_agc_vals[] = {
0x7b000001, 0x7b010001, 0x7b020001, 0x7b030001, 0x7b040001,
0x7b050001, 0x7a060001, 0x79070001, 0x78080001, 0x77090001,
0x760a0001, 0x750b0001, 0x740c0001, 0x730d0001, 0x720e0001,
0x710f0001, 0x70100001, 0x6f110001, 0x6e120001, 0x6d130001,
0x6c140001, 0x6b150001, 0x6a160001, 0x69170001, 0x68180001,
0x67190001, 0x661a0001, 0x651b0001, 0x641c0001, 0x631d0001,
0x621e0001, 0x611f0001, 0x60200001, 0x49210001, 0x48220001,
0x47230001, 0x46240001, 0x45250001, 0x44260001, 0x43270001,
0x42280001, 0x41290001, 0x402a0001, 0x262b0001, 0x252c0001,
0x242d0001, 0x232e0001, 0x222f0001, 0x21300001, 0x20310001,
0x06320001, 0x05330001, 0x04340001, 0x03350001, 0x02360001,
0x01370001, 0x00380001, 0x00390001, 0x003a0001, 0x003b0001,
0x003c0001, 0x003d0001, 0x003e0001, 0x003f0001, 0x7b400001,
0x7b410001, 0x7b420001, 0x7b430001, 0x7b440001, 0x7b450001,
0x7a460001, 0x79470001, 0x78480001, 0x77490001, 0x764a0001,
0x754b0001, 0x744c0001, 0x734d0001, 0x724e0001, 0x714f0001,
0x70500001, 0x6f510001, 0x6e520001, 0x6d530001, 0x6c540001,
0x6b550001, 0x6a560001, 0x69570001, 0x68580001, 0x67590001,
0x665a0001, 0x655b0001, 0x645c0001, 0x635d0001, 0x625e0001,
0x615f0001, 0x60600001, 0x49610001, 0x48620001, 0x47630001,
0x46640001, 0x45650001, 0x44660001, 0x43670001, 0x42680001,
0x41690001, 0x406a0001, 0x266b0001, 0x256c0001, 0x246d0001,
0x236e0001, 0x226f0001, 0x21700001, 0x20710001, 0x06720001,
0x05730001, 0x04740001, 0x03750001, 0x02760001, 0x01770001,
0x00780001, 0x00790001, 0x007a0001, 0x007b0001, 0x007c0001,
0x007d0001, 0x007e0001, 0x007f0001, 0x3800001e, 0x3801001e,
0x3802001e, 0x3803001e, 0x3804001e, 0x3805001e, 0x3806001e,
0x3807001e, 0x3808001e, 0x3c09001e, 0x3e0a001e, 0x400b001e,
0x440c001e, 0x480d001e, 0x4c0e001e, 0x500f001e, 0x5210001e,
0x5611001e, 0x5a12001e, 0x5e13001e, 0x6014001e, 0x6015001e,
0x6016001e, 0x6217001e, 0x6218001e, 0x6219001e, 0x621a001e,
0x621b001e, 0x621c001e, 0x621d001e, 0x621e001e, 0x621f001e
};
static const struct urtwn_bb_prog rtl8192ce_bb_prog = {
nitems(rtl8192ce_bb_regs),
rtl8192ce_bb_regs,
rtl8192ce_bb_vals,
nitems(rtl8192ce_agc_vals),
rtl8192ce_agc_vals
};
/*
* RTL8188CU.
*/
static const uint32_t rtl8192cu_bb_vals[] = {
0x0011800d, 0x00ffdb83, 0x80040002, 0x00000003, 0x0000fc00,
0x0000000a, 0x10005388, 0x020c3d10, 0x02200385, 0x00000000,
0x01000100, 0x00390004, 0x01000100, 0x00390004, 0x27272727,
0x27272727, 0x27272727, 0x27272727, 0x00010000, 0x00010000,
0x27272727, 0x27272727, 0x00000000, 0x00000000, 0x569a569a,
0x0c1b25a4, 0x66e60230, 0x061f0130, 0x27272727, 0x2b2b2b27,
0x07000700, 0x22184000, 0x08080808, 0x00000000, 0xc0083070,
0x000004d5, 0x00000000, 0xcc0000c0, 0x00000800, 0xfffffffe,
0x40302010, 0x00706050, 0x00000000, 0x00000023, 0x00000000,
0x81121313, 0x00d047c8, 0x80ff000c, 0x8c838300, 0x2e68120f,
0x9500bb78, 0x11144028, 0x00881117, 0x89140f00, 0x1a1b0000,
0x090e1317, 0x00000204, 0x00d30000, 0x101fbf00, 0x00000007,
0x48071d40, 0x03a05633, 0x000000e4, 0x6c6c6c6c, 0x08800000,
0x40000100, 0x08800000, 0x40000100, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x69e9ac44, 0x469652cf, 0x49795994,
0x0a97971c, 0x1f7c403f, 0x000100b7, 0xec020107, 0x007f037f,
0x6954341e, 0x43bc0094, 0x6954341e, 0x433c0094, 0x00000000,
0x5116848b, 0x47c00bff, 0x00000036, 0x2c7f000d, 0x0186115b,
0x0000001f, 0x00b99612, 0x40000100, 0x20f60000, 0x40000100,
0x20200000, 0x00121820, 0x00000000, 0x00121820, 0x00007f7f,
0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x28000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x64b22427,
0x00766932, 0x00222222, 0x00000000, 0x37644302, 0x2f97d40c,
0x00080740, 0x00020403, 0x0000907f, 0x20010201, 0xa0633333,
0x3333bc43, 0x7a8f5b6b, 0xcc979975, 0x00000000, 0x80608000,
0x00000000, 0x00027293, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x6437140a, 0x00000000, 0x00000000, 0x30032064,
0x4653de68, 0x04518a3c, 0x00002101, 0x2a201c16, 0x1812362e,
0x322c2220, 0x000e3c24, 0x2a2a2a2a, 0x2a2a2a2a, 0x03902a2a,
0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x00000000,
0x1000dc1f, 0x10008c1f, 0x02140102, 0x681604c2, 0x01007c00,
0x01004800, 0xfb000000, 0x000028d1, 0x1000dc1f, 0x10008c1f,
0x02140102, 0x28160d05, 0x00000010, 0x001b25a4, 0x63db25a4,
0x63db25a4, 0x0c1b25a4, 0x0c1b25a4, 0x0c1b25a4, 0x0c1b25a4,
0x63db25a4, 0x0c1b25a4, 0x63db25a4, 0x63db25a4, 0x63db25a4,
0x63db25a4, 0x001b25a4, 0x001b25a4, 0x6fdb25a4, 0x00000003,
0x00000000, 0x00000300
};
static const struct urtwn_bb_prog rtl8192cu_bb_prog = {
nitems(rtl8192ce_bb_regs),
rtl8192ce_bb_regs,
rtl8192cu_bb_vals,
nitems(rtl8192ce_agc_vals),
rtl8192ce_agc_vals
};
/*
* RTL8188CE-VAU.
*/
static const uint32_t rtl8188ce_bb_vals[] = {
0x0011800d, 0x00ffdb83, 0x80040000, 0x00000001, 0x0000fc00,
0x0000000a, 0x10005388, 0x020c3d10, 0x02200385, 0x00000000,
0x01000100, 0x00390004, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00010000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x569a569a,
0x001b25a4, 0x66e60230, 0x061f0130, 0x00000000, 0x32323200,
0x07000700, 0x22004000, 0x00000808, 0x00000000, 0xc0083070,
0x000004d5, 0x00000000, 0xccc000c0, 0x00000800, 0xfffffffe,
0x40302010, 0x00706050, 0x00000000, 0x00000023, 0x00000000,
0x81121111, 0x00d047c8, 0x80ff000c, 0x8c838300, 0x2e68120f,
0x9500bb78, 0x11144028, 0x00881117, 0x89140f00, 0x1a1b0000,
0x090e1317, 0x00000204, 0x00d30000, 0x101fbf00, 0x00000007,
0x48071d40, 0x03a05611, 0x000000e4, 0x6c6c6c6c, 0x08800000,
0x40000100, 0x08800000, 0x40000100, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x69e9ac44, 0x469652cf, 0x49795994,
0x0a97971c, 0x1f7c403f, 0x000100b7, 0xec020107, 0x007f037f,
0x6954341e, 0x43bc0094, 0x6954341e, 0x433c0094, 0x00000000,
0x5116848b, 0x47c00bff, 0x00000036, 0x2c7f000d, 0x018610db,
0x0000001f, 0x00b91612, 0x40000100, 0x20f60000, 0x40000100,
0x20200000, 0x00121820, 0x00000000, 0x00121820, 0x00007f7f,
0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x28000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x64b22427,
0x00766932, 0x00222222, 0x00000000, 0x37644302, 0x2f97d40c,
0x00080740, 0x00020401, 0x0000907f, 0x20010201, 0xa0633333,
0x3333bc43, 0x7a8f5b6b, 0xcc979975, 0x00000000, 0x80608000,
0x00000000, 0x00027293, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x6437140a, 0x00000000, 0x00000000, 0x30032064,
0x4653de68, 0x04518a3c, 0x00002101, 0x2a201c16, 0x1812362e,
0x322c2220, 0x000e3c24, 0x2a2a2a2a, 0x2a2a2a2a, 0x03902a2a,
0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x00000000,
0x1000dc1f, 0x10008c1f, 0x02140102, 0x681604c2, 0x01007c00,
0x01004800, 0xfb000000, 0x000028d1, 0x1000dc1f, 0x10008c1f,
0x02140102, 0x28160d05, 0x00000008, 0x001b25a4, 0x631b25a0,
0x631b25a0, 0x081b25a0, 0x081b25a0, 0x081b25a0, 0x081b25a0,
0x631b25a0, 0x081b25a0, 0x631b25a0, 0x631b25a0, 0x631b25a0,
0x631b25a0, 0x001b25a0, 0x001b25a0, 0x6b1b25a0, 0x00000003,
0x00000000, 0x00000300
};
static const uint32_t rtl8188ce_agc_vals[] = {
0x7b000001, 0x7b010001, 0x7b020001, 0x7b030001, 0x7b040001,
0x7b050001, 0x7a060001, 0x79070001, 0x78080001, 0x77090001,
0x760a0001, 0x750b0001, 0x740c0001, 0x730d0001, 0x720e0001,
0x710f0001, 0x70100001, 0x6f110001, 0x6e120001, 0x6d130001,
0x6c140001, 0x6b150001, 0x6a160001, 0x69170001, 0x68180001,
0x67190001, 0x661a0001, 0x651b0001, 0x641c0001, 0x631d0001,
0x621e0001, 0x611f0001, 0x60200001, 0x49210001, 0x48220001,
0x47230001, 0x46240001, 0x45250001, 0x44260001, 0x43270001,
0x42280001, 0x41290001, 0x402a0001, 0x262b0001, 0x252c0001,
0x242d0001, 0x232e0001, 0x222f0001, 0x21300001, 0x20310001,
0x06320001, 0x05330001, 0x04340001, 0x03350001, 0x02360001,
0x01370001, 0x00380001, 0x00390001, 0x003a0001, 0x003b0001,
0x003c0001, 0x003d0001, 0x003e0001, 0x003f0001, 0x7b400001,
0x7b410001, 0x7b420001, 0x7b430001, 0x7b440001, 0x7b450001,
0x7a460001, 0x79470001, 0x78480001, 0x77490001, 0x764a0001,
0x754b0001, 0x744c0001, 0x734d0001, 0x724e0001, 0x714f0001,
0x70500001, 0x6f510001, 0x6e520001, 0x6d530001, 0x6c540001,
0x6b550001, 0x6a560001, 0x69570001, 0x68580001, 0x67590001,
0x665a0001, 0x655b0001, 0x645c0001, 0x635d0001, 0x625e0001,
0x615f0001, 0x60600001, 0x49610001, 0x48620001, 0x47630001,
0x46640001, 0x45650001, 0x44660001, 0x43670001, 0x42680001,
0x41690001, 0x406a0001, 0x266b0001, 0x256c0001, 0x246d0001,
0x236e0001, 0x226f0001, 0x21700001, 0x20710001, 0x06720001,
0x05730001, 0x04740001, 0x03750001, 0x02760001, 0x01770001,
0x00780001, 0x00790001, 0x007a0001, 0x007b0001, 0x007c0001,
0x007d0001, 0x007e0001, 0x007f0001, 0x3800001e, 0x3801001e,
0x3802001e, 0x3803001e, 0x3804001e, 0x3805001e, 0x3806001e,
0x3807001e, 0x3808001e, 0x3c09001e, 0x3e0a001e, 0x400b001e,
0x440c001e, 0x480d001e, 0x4c0e001e, 0x500f001e, 0x5210001e,
0x5611001e, 0x5a12001e, 0x5e13001e, 0x6014001e, 0x6015001e,
0x6016001e, 0x6217001e, 0x6218001e, 0x6219001e, 0x621a001e,
0x621b001e, 0x621c001e, 0x621d001e, 0x621e001e, 0x621f001e
};
static const struct urtwn_bb_prog rtl8188ce_bb_prog = {
nitems(rtl8192ce_bb_regs),
rtl8192ce_bb_regs,
rtl8188ce_bb_vals,
nitems(rtl8188ce_agc_vals),
rtl8188ce_agc_vals
};
static const uint32_t rtl8188cu_bb_vals[] = {
0x0011800d, 0x00ffdb83, 0x80040000, 0x00000001, 0x0000fc00,
0x0000000a, 0x10005388, 0x020c3d10, 0x02200385, 0x00000000,
0x01000100, 0x00390004, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00010000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x569a569a,
0x001b25a4, 0x66e60230, 0x061f0130, 0x00000000, 0x32323200,
0x07000700, 0x22004000, 0x00000808, 0x00000000, 0xc0083070,
0x000004d5, 0x00000000, 0xccc000c0, 0x00000800, 0xfffffffe,
0x40302010, 0x00706050, 0x00000000, 0x00000023, 0x00000000,
0x81121111, 0x00d047c8, 0x80ff000c, 0x8c838300, 0x2e68120f,
0x9500bb78, 0x11144028, 0x00881117, 0x89140f00, 0x1a1b0000,
0x090e1317, 0x00000204, 0x00d30000, 0x101fbf00, 0x00000007,
0x48071d40, 0x03a05611, 0x000000e4, 0x6c6c6c6c, 0x08800000,
0x40000100, 0x08800000, 0x40000100, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x69e9ac44, 0x469652cf, 0x49795994,
0x0a97971c, 0x1f7c403f, 0x000100b7, 0xec020107, 0x007f037f,
0x6954341e, 0x43bc0094, 0x6954341e, 0x433c0094, 0x00000000,
0x5116848b, 0x47c00bff, 0x00000036, 0x2c7f000d, 0x018610db,
0x0000001f, 0x00b91612, 0x40000100, 0x20f60000, 0x40000100,
0x20200000, 0x00121820, 0x00000000, 0x00121820, 0x00007f7f,
0x00000000, 0x00000080, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x28000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x64b22427,
0x00766932, 0x00222222, 0x00000000, 0x37644302, 0x2f97d40c,
0x00080740, 0x00020401, 0x0000907f, 0x20010201, 0xa0633333,
0x3333bc43, 0x7a8f5b6b, 0xcc979975, 0x00000000, 0x80608000,
0x00000000, 0x00027293, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x6437140a, 0x00000000, 0x00000000, 0x30032064,
0x4653de68, 0x04518a3c, 0x00002101, 0x2a201c16, 0x1812362e,
0x322c2220, 0x000e3c24, 0x2a2a2a2a, 0x2a2a2a2a, 0x03902a2a,
0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x00000000,
0x1000dc1f, 0x10008c1f, 0x02140102, 0x681604c2, 0x01007c00,
0x01004800, 0xfb000000, 0x000028d1, 0x1000dc1f, 0x10008c1f,
0x02140102, 0x28160d05, 0x00000008, 0x001b25a4, 0x631b25a0,
0x631b25a0, 0x081b25a0, 0x081b25a0, 0x081b25a0, 0x081b25a0,
0x631b25a0, 0x081b25a0, 0x631b25a0, 0x631b25a0, 0x631b25a0,
0x631b25a0, 0x001b25a0, 0x001b25a0, 0x6b1b25a0, 0x00000003,
0x00000000, 0x00000300
};
static const struct urtwn_bb_prog rtl8188cu_bb_prog = {
nitems(rtl8192ce_bb_regs),
rtl8192ce_bb_regs,
rtl8188cu_bb_vals,
nitems(rtl8188ce_agc_vals),
rtl8188ce_agc_vals
};
/*
* RTL8188EU.
*/
static const uint16_t rtl8188eu_bb_regs[] = {
0x800, 0x804, 0x808, 0x80c, 0x810, 0x814, 0x818, 0x81c,
0x820, 0x824, 0x828, 0x82c, 0x830, 0x834, 0x838, 0x83c,
0x840, 0x844, 0x848, 0x84c, 0x850, 0x854, 0x858, 0x85c,
0x860, 0x864, 0x868, 0x86c, 0x870, 0x874, 0x878, 0x87c,
0x880, 0x884, 0x888, 0x88c, 0x890, 0x894, 0x898, 0x89c,
0x900, 0x904, 0x908, 0x90c, 0x910, 0x914, 0xa00, 0xa04,
0xa08, 0xa0c, 0xa10, 0xa14, 0xa18, 0xa1c, 0xa20, 0xa24,
0xa28, 0xa2c, 0xa70, 0xa74, 0xa78, 0xa7c, 0xa80, 0xb2c,
0xc00, 0xc04, 0xc08, 0xc0c, 0xc10, 0xc14, 0xc18, 0xc1c,
0xc20, 0xc24, 0xc28, 0xc2c, 0xc30, 0xc34, 0xc38, 0xc3c,
0xc40, 0xc44, 0xc48, 0xc4c, 0xc50, 0xc54, 0xc58, 0xc5c,
0xc60, 0xc64, 0xc68, 0xc6c, 0xc70, 0xc74, 0xc78, 0xc7c,
0xc80, 0xc84, 0xc88, 0xc8c, 0xc90, 0xc94, 0xc98, 0xc9c,
0xca0, 0xca4, 0xca8, 0xcac, 0xcb0, 0xcb4, 0xcb8, 0xcbc,
0xcc0, 0xcc4, 0xcc8, 0xccc, 0xcd0, 0xcd4, 0xcd8, 0xcdc,
0xce0, 0xce4, 0xce8, 0xcec, 0xd00, 0xd04, 0xd08, 0xd0c,
0xd10, 0xd14, 0xd18, 0xd2c, 0xd30, 0xd34, 0xd38, 0xd3c,
0xd40, 0xd44, 0xd48, 0xd4c, 0xd50, 0xd54, 0xd58, 0xd5c,
0xd60, 0xd64, 0xd68, 0xd6c, 0xd70, 0xd74, 0xd78, 0xe00,
0xe04, 0xe08, 0xe10, 0xe14, 0xe18, 0xe1c, 0xe28, 0xe30,
0xe34, 0xe38, 0xe3c, 0xe40, 0xe44, 0xe48, 0xe4c, 0xe50,
0xe54, 0xe58, 0xe5c, 0xe60, 0xe68, 0xe6c, 0xe70, 0xe74,
0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, 0xed0, 0xed4,
0xed8, 0xedc, 0xee0, 0xee8, 0xeec, 0xf14, 0xf4c, 0xf00
};
static const uint32_t rtl8188eu_bb_vals[] = {
0x80040000, 0x00000003, 0x0000fc00, 0x0000000a, 0x10001331,
0x020c3d10, 0x02200385, 0x00000000, 0x01000100, 0x00390204,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00010000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x569a11a9, 0x01000014, 0x66f60110,
0x061f0649, 0x00000000, 0x27272700, 0x07000760, 0x25004000,
0x00000808, 0x00000000, 0xb0000c1c, 0x00000001, 0x00000000,
0xccc000c0, 0x00000800, 0xfffffffe, 0x40302010, 0x00706050,
0x00000000, 0x00000023, 0x00000000, 0x81121111, 0x00000002,
0x00000201, 0x00d047c8, 0x80ff000c, 0x8c838300, 0x2e7f120f,
0x9500bb78, 0x1114d028, 0x00881117, 0x89140f00, 0x1a1b0000,
0x090e1317, 0x00000204, 0x00d30000, 0x101fbf00, 0x00000007,
0x00000900, 0x225b0606, 0x218075b1, 0x80000000, 0x48071d40,
0x03a05611, 0x000000e4, 0x6c6c6c6c, 0x08800000, 0x40000100,
0x08800000, 0x40000100, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x69e9ac47, 0x469652af, 0x49795994, 0x0a97971c,
0x1f7c403f, 0x000100b7, 0xec020107, 0x007f037f, 0x69553420,
0x43bc0094, 0x00013169, 0x00250492, 0x00000000, 0x7112848b,
0x47c00bff, 0x00000036, 0x2c7f000d, 0x020610db, 0x0000001f,
0x00b91612, 0x390000e4, 0x20f60000, 0x40000100, 0x20200000,
0x00091521, 0x00000000, 0x00121820, 0x00007f7f, 0x00000000,
0x000300a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x28000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x64b22427, 0x00766932,
0x00222222, 0x00000000, 0x37644302, 0x2f97d40c, 0x00000740,
0x00020401, 0x0000907f, 0x20010201, 0xa0633333, 0x3333bc43,
0x7a8f5b6f, 0xcc979975, 0x00000000, 0x80608000, 0x00000000,
0x00127353, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x6437140a, 0x00000000, 0x00000282, 0x30032064, 0x4653de68,
0x04518a3c, 0x00002101, 0x2a201c16, 0x1812362e, 0x322c2220,
0x000e3c24, 0x2d2d2d2d, 0x2d2d2d2d, 0x0390272d, 0x2d2d2d2d,
0x2d2d2d2d, 0x2d2d2d2d, 0x2d2d2d2d, 0x00000000, 0x1000dc1f,
0x10008c1f, 0x02140102, 0x681604c2, 0x01007c00, 0x01004800,
0xfb000000, 0x000028d1, 0x1000dc1f, 0x10008c1f, 0x02140102,
0x28160d05, 0x00000008, 0x001b25a4, 0x00c00014, 0x00c00014,
0x01000014, 0x01000014, 0x01000014, 0x01000014, 0x00c00014,
0x01000014, 0x00c00014, 0x00c00014, 0x00c00014, 0x00c00014,
0x00000014, 0x00000014, 0x21555448, 0x01c00014, 0x00000003,
0x00000000, 0x00000300
};
static const uint32_t rtl8188eu_agc_vals[] = {
0xfb000001, 0xfb010001, 0xfb020001, 0xfb030001, 0xfb040001,
0xfb050001, 0xfa060001, 0xf9070001, 0xf8080001, 0xf7090001,
0xf60a0001, 0xf50b0001, 0xf40c0001, 0xf30d0001, 0xf20e0001,
0xf10f0001, 0xf0100001, 0xef110001, 0xee120001, 0xed130001,
0xec140001, 0xeb150001, 0xea160001, 0xe9170001, 0xe8180001,
0xe7190001, 0xe61a0001, 0xe51b0001, 0xe41c0001, 0xe31d0001,
0xe21e0001, 0xe11f0001, 0x8a200001, 0x89210001, 0x88220001,
0x87230001, 0x86240001, 0x85250001, 0x84260001, 0x83270001,
0x82280001, 0x6b290001, 0x6a2a0001, 0x692b0001, 0x682c0001,
0x672d0001, 0x662e0001, 0x652f0001, 0x64300001, 0x63310001,
0x62320001, 0x61330001, 0x46340001, 0x45350001, 0x44360001,
0x43370001, 0x42380001, 0x41390001, 0x403a0001, 0x403b0001,
0x403c0001, 0x403d0001, 0x403e0001, 0x403f0001, 0xfb400001,
0xfb410001, 0xfb420001, 0xfb430001, 0xfb440001, 0xfb450001,
0xfb460001, 0xfb470001, 0xfb480001, 0xfa490001, 0xf94a0001,
0xf84B0001, 0xf74c0001, 0xf64d0001, 0xf54e0001, 0xf44f0001,
0xf3500001, 0xf2510001, 0xf1520001, 0xf0530001, 0xef540001,
0xee550001, 0xed560001, 0xec570001, 0xeb580001, 0xea590001,
0xe95a0001, 0xe85b0001, 0xe75c0001, 0xe65d0001, 0xe55e0001,
0xe45f0001, 0xe3600001, 0xe2610001, 0xc3620001, 0xc2630001,
0xc1640001, 0x8b650001, 0x8a660001, 0x89670001, 0x88680001,
0x87690001, 0x866a0001, 0x856b0001, 0x846c0001, 0x676d0001,
0x666e0001, 0x656f0001, 0x64700001, 0x63710001, 0x62720001,
0x61730001, 0x60740001, 0x46750001, 0x45760001, 0x44770001,
0x43780001, 0x42790001, 0x417a0001, 0x407b0001, 0x407c0001,
0x407d0001, 0x407e0001, 0x407f0001
};
static const struct urtwn_bb_prog rtl8188eu_bb_prog = {
nitems(rtl8188eu_bb_regs),
rtl8188eu_bb_regs,
rtl8188eu_bb_vals,
nitems(rtl8188eu_agc_vals),
rtl8188eu_agc_vals
};
/*
* RTL8188RU.
*/
static const uint16_t rtl8188ru_bb_regs[] = {
0x024, 0x028, 0x040, 0x800, 0x804, 0x808, 0x80c, 0x810, 0x814,
0x818, 0x81c, 0x820, 0x824, 0x828, 0x82c, 0x830, 0x834, 0x838,
0x83c, 0x840, 0x844, 0x848, 0x84c, 0x850, 0x854, 0x858, 0x85c,
0x860, 0x864, 0x868, 0x86c, 0x870, 0x874, 0x878, 0x87c, 0x880,
0x884, 0x888, 0x88c, 0x890, 0x894, 0x898, 0x89c, 0x900, 0x904,
0x908, 0x90c, 0xa00, 0xa04, 0xa08, 0xa0c, 0xa10, 0xa14, 0xa18,
0xa1c, 0xa20, 0xa24, 0xa28, 0xa2c, 0xa70, 0xa74, 0xc00, 0xc04,
0xc08, 0xc0c, 0xc10, 0xc14, 0xc18, 0xc1c, 0xc20, 0xc24, 0xc28,
0xc2c, 0xc30, 0xc34, 0xc38, 0xc3c, 0xc40, 0xc44, 0xc48, 0xc4c,
0xc50, 0xc54, 0xc58, 0xc5c, 0xc60, 0xc64, 0xc68, 0xc6c, 0xc70,
0xc74, 0xc78, 0xc7c, 0xc80, 0xc84, 0xc88, 0xc8c, 0xc90, 0xc94,
0xc98, 0xc9c, 0xca0, 0xca4, 0xca8, 0xcac, 0xcb0, 0xcb4, 0xcb8,
0xcbc, 0xcc0, 0xcc4, 0xcc8, 0xccc, 0xcd0, 0xcd4, 0xcd8, 0xcdc,
0xce0, 0xce4, 0xce8, 0xcec, 0xd00, 0xd04, 0xd08, 0xd0c, 0xd10,
0xd14, 0xd18, 0xd2c, 0xd30, 0xd34, 0xd38, 0xd3c, 0xd40, 0xd44,
0xd48, 0xd4c, 0xd50, 0xd54, 0xd58, 0xd5c, 0xd60, 0xd64, 0xd68,
0xd6c, 0xd70, 0xd74, 0xd78, 0xe00, 0xe04, 0xe08, 0xe10, 0xe14,
0xe18, 0xe1c, 0xe28, 0xe30, 0xe34, 0xe38, 0xe3c, 0xe40, 0xe44,
0xe48, 0xe4c, 0xe50, 0xe54, 0xe58, 0xe5c, 0xe60, 0xe68, 0xe6c,
0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, 0xed0,
0xed4, 0xed8, 0xedc, 0xee0, 0xeec, 0xee8, 0xf14, 0xf4c, 0xf00
};
static const uint32_t rtl8188ru_bb_vals[] = {
0x0011800d, 0x00ffdb83, 0x000c0004, 0x80040000, 0x00000001,
0x0000fc00, 0x0000000a, 0x10005388, 0x020c3d10, 0x02200385,
0x00000000, 0x01000100, 0x00390204, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00010000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x569a569a, 0x001b25a4, 0x66e60230, 0x061f0130, 0x00000000,
0x32323200, 0x03000300, 0x22004000, 0x00000808, 0x00ffc3f1,
0xc0083070, 0x000004d5, 0x00000000, 0xccc000c0, 0x00000800,
0xfffffffe, 0x40302010, 0x00706050, 0x00000000, 0x00000023,
0x00000000, 0x81121111, 0x00d047c8, 0x80ff000c, 0x8c838300,
0x2e68120f, 0x9500bb78, 0x11144028, 0x00881117, 0x89140f00,
0x15160000, 0x070b0f12, 0x00000104, 0x00d30000, 0x101fbf00,
0x00000007, 0x48071d40, 0x03a05611, 0x000000e4, 0x6c6c6c6c,
0x08800000, 0x40000100, 0x08800000, 0x40000100, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x69e9ac44, 0x469652cf,
0x49795994, 0x0a97971c, 0x1f7c403f, 0x000100b7, 0xec020107,
0x007f037f, 0x6954342e, 0x43bc0094, 0x6954342f, 0x433c0094,
0x00000000, 0x5116848b, 0x47c00bff, 0x00000036, 0x2c56000d,
0x018610db, 0x0000001f, 0x00b91612, 0x24000090, 0x20f60000,
0x24000090, 0x20200000, 0x00121820, 0x00000000, 0x00121820,
0x00007f7f, 0x00000000, 0x00000080, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x28000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x64b22427, 0x00766932, 0x00222222, 0x00000000, 0x37644302,
0x2f97d40c, 0x00080740, 0x00020401, 0x0000907f, 0x20010201,
0xa0633333, 0x3333bc43, 0x7a8f5b6b, 0xcc979975, 0x00000000,
0x80608000, 0x00000000, 0x00027293, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x6437140a, 0x00000000, 0x00000000,
0x30032064, 0x4653de68, 0x04518a3c, 0x00002101, 0x2a201c16,
0x1812362e, 0x322c2220, 0x000e3c24, 0x2a2a2a2a, 0x2a2a2a2a,
0x03902a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a, 0x2a2a2a2a,
0x00000000, 0x1000dc1f, 0x10008c1f, 0x02140102, 0x681604c2,
0x01007c00, 0x01004800, 0xfb000000, 0x000028d1, 0x1000dc1f,
0x10008c1f, 0x02140102, 0x28160d05, 0x00000010, 0x001b25a4,
0x631b25a0, 0x631b25a0, 0x081b25a0, 0x081b25a0, 0x081b25a0,
0x081b25a0, 0x631b25a0, 0x081b25a0, 0x631b25a0, 0x631b25a0,
0x631b25a0, 0x631b25a0, 0x001b25a0, 0x001b25a0, 0x6b1b25a0,
0x31555448, 0x00000003, 0x00000000, 0x00000300
};
static const uint32_t rtl8188ru_agc_vals[] = {
0x7b000001, 0x7b010001, 0x7b020001, 0x7b030001, 0x7b040001,
0x7b050001, 0x7b060001, 0x7b070001, 0x7b080001, 0x7a090001,
0x790a0001, 0x780b0001, 0x770c0001, 0x760d0001, 0x750e0001,
0x740f0001, 0x73100001, 0x72110001, 0x71120001, 0x70130001,
0x6f140001, 0x6e150001, 0x6d160001, 0x6c170001, 0x6b180001,
0x6a190001, 0x691a0001, 0x681b0001, 0x671c0001, 0x661d0001,
0x651e0001, 0x641f0001, 0x63200001, 0x62210001, 0x61220001,
0x60230001, 0x46240001, 0x45250001, 0x44260001, 0x43270001,
0x42280001, 0x41290001, 0x402a0001, 0x262b0001, 0x252c0001,
0x242d0001, 0x232e0001, 0x222f0001, 0x21300001, 0x20310001,
0x06320001, 0x05330001, 0x04340001, 0x03350001, 0x02360001,
0x01370001, 0x00380001, 0x00390001, 0x003a0001, 0x003b0001,
0x003c0001, 0x003d0001, 0x003e0001, 0x003f0001, 0x7b400001,
0x7b410001, 0x7b420001, 0x7b430001, 0x7b440001, 0x7b450001,
0x7b460001, 0x7b470001, 0x7b480001, 0x7a490001, 0x794a0001,
0x784b0001, 0x774c0001, 0x764d0001, 0x754e0001, 0x744f0001,
0x73500001, 0x72510001, 0x71520001, 0x70530001, 0x6f540001,
0x6e550001, 0x6d560001, 0x6c570001, 0x6b580001, 0x6a590001,
0x695a0001, 0x685b0001, 0x675c0001, 0x665d0001, 0x655e0001,
0x645f0001, 0x63600001, 0x62610001, 0x61620001, 0x60630001,
0x46640001, 0x45650001, 0x44660001, 0x43670001, 0x42680001,
0x41690001, 0x406a0001, 0x266b0001, 0x256c0001, 0x246d0001,
0x236e0001, 0x226f0001, 0x21700001, 0x20710001, 0x06720001,
0x05730001, 0x04740001, 0x03750001, 0x02760001, 0x01770001,
0x00780001, 0x00790001, 0x007a0001, 0x007b0001, 0x007c0001,
0x007d0001, 0x007e0001, 0x007f0001, 0x3800001e, 0x3801001e,
0x3802001e, 0x3803001e, 0x3804001e, 0x3805001e, 0x3806001e,
0x3807001e, 0x3808001e, 0x3c09001e, 0x3e0a001e, 0x400b001e,
0x440c001e, 0x480d001e, 0x4c0e001e, 0x500f001e, 0x5210001e,
0x5611001e, 0x5a12001e, 0x5e13001e, 0x6014001e, 0x6015001e,
0x6016001e, 0x6217001e, 0x6218001e, 0x6219001e, 0x621a001e,
0x621b001e, 0x621c001e, 0x621d001e, 0x621e001e, 0x621f001e
};
static const struct urtwn_bb_prog rtl8188ru_bb_prog = {
nitems(rtl8188ru_bb_regs),
rtl8188ru_bb_regs,
rtl8188ru_bb_vals,
nitems(rtl8188ru_agc_vals),
rtl8188ru_agc_vals
};
/*
* RF initialization values.
*/
struct urtwn_rf_prog {
int count;
const uint8_t *regs;
const uint32_t *vals;
};
/*
* RTL8192CU and RTL8192CE-VAU.
*/
static const uint8_t rtl8192ce_rf1_regs[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22,
0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2a, 0x2b,
0x2a, 0x2b, 0x2b, 0x2c, 0x2a, 0x2b, 0x2b, 0x2c, 0x2a, 0x2b, 0x2b,
0x2c, 0x2a, 0x2b, 0x2b, 0x2c, 0x2a, 0x2b, 0x2b, 0x2c, 0x2a, 0x2b,
0x2b, 0x2c, 0x2a, 0x2b, 0x2b, 0x2c, 0x2a, 0x2b, 0x2b, 0x2c, 0x2a,
0x2b, 0x2b, 0x2c, 0x2a, 0x2b, 0x2b, 0x2c, 0x2a, 0x2b, 0x2b, 0x2c,
0x2a, 0x2b, 0x2b, 0x2c, 0x2a, 0x2b, 0x2b, 0x2c, 0x2a, 0x2b, 0x2b,
0x2c, 0x2a, 0x10, 0x11, 0x10, 0x11, 0x10, 0x11, 0x10, 0x11, 0x10,
0x11, 0x10, 0x11, 0x10, 0x11, 0x12, 0x12, 0x12, 0x12, 0x13, 0x13,
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x14, 0x14,
0x14, 0x14, 0x15, 0x15, 0x15, 0x15, 0x16, 0x16, 0x16, 0x16, 0x00,
0x18, 0xfe, 0xfe, 0x1f, 0xfe, 0xfe, 0x1e, 0x1f, 0x00
};
static const uint32_t rtl8192ce_rf1_vals[] = {
0x30159, 0x31284, 0x98000, 0x18c63, 0x210e7, 0x2044f, 0x1adb1,
0x54867, 0x8992e, 0x0e52c, 0x39ce7, 0x00451, 0x00000, 0x10255,
0x60a00, 0xfc378, 0xa1250, 0x4445f, 0x80001, 0x0b614, 0x6c000,
0x00000, 0x01558, 0x00060, 0x00483, 0x4f000, 0xec7d9, 0x577c0,
0x04783, 0x00001, 0x21334, 0x00000, 0x00054, 0x00001, 0x00808,
0x53333, 0x0000c, 0x00002, 0x00808, 0x5b333, 0x0000d, 0x00003,
0x00808, 0x63333, 0x0000d, 0x00004, 0x00808, 0x6b333, 0x0000d,
0x00005, 0x00808, 0x73333, 0x0000d, 0x00006, 0x00709, 0x5b333,
0x0000d, 0x00007, 0x00709, 0x63333, 0x0000d, 0x00008, 0x0060a,
0x4b333, 0x0000d, 0x00009, 0x0060a, 0x53333, 0x0000d, 0x0000a,
0x0060a, 0x5b333, 0x0000d, 0x0000b, 0x0060a, 0x63333, 0x0000d,
0x0000c, 0x0060a, 0x6b333, 0x0000d, 0x0000d, 0x0060a, 0x73333,
0x0000d, 0x0000e, 0x0050b, 0x66666, 0x0001a, 0xe0000, 0x4000f,
0xe31fc, 0x6000f, 0xff9f8, 0x2000f, 0x203f9, 0x3000f, 0xff500,
0x00000, 0x00000, 0x8000f, 0x3f100, 0x9000f, 0x23100, 0x32000,
0x71000, 0xb0000, 0xfc000, 0x287af, 0x244b7, 0x204ab, 0x1c49f,
0x18493, 0x14297, 0x10295, 0x0c298, 0x0819c, 0x040a8, 0x0001c,
0x1944c, 0x59444, 0x9944c, 0xd9444, 0x0f424, 0x4f424, 0x8f424,
0xcf424, 0xe0330, 0xa0330, 0x60330, 0x20330, 0x10159, 0x0f401,
0x00000, 0x00000, 0x80003, 0x00000, 0x00000, 0x44457, 0x80000,
0x30159
};
static const uint8_t rtl8192ce_rf2_regs[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
0x0f, 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
0x13, 0x13, 0x13, 0x13, 0x13, 0x14, 0x14, 0x14, 0x14, 0x15, 0x15,
0x15, 0x15, 0x16, 0x16, 0x16, 0x16
};
static const uint32_t rtl8192ce_rf2_vals[] = {
0x30159, 0x31284, 0x98000, 0x18c63, 0x210e7, 0x2044f, 0x1adb1,
0x54867, 0x8992e, 0x0e52c, 0x39ce7, 0x00451, 0x32000, 0x71000,
0xb0000, 0xfc000, 0x287af, 0x244b7, 0x204ab, 0x1c49f, 0x18493,
0x14297, 0x10295, 0x0c298, 0x0819c, 0x040a8, 0x0001c, 0x1944c,
0x59444, 0x9944c, 0xd9444, 0x0f424, 0x4f424, 0x8f424, 0xcf424,
0xe0330, 0xa0330, 0x60330, 0x20330
};
static const struct urtwn_rf_prog rtl8192ce_rf_prog[] = {
{
nitems(rtl8192ce_rf1_regs),
rtl8192ce_rf1_regs,
rtl8192ce_rf1_vals
},
{
nitems(rtl8192ce_rf2_regs),
rtl8192ce_rf2_regs,
rtl8192ce_rf2_vals
}
};
/*
* RTL8188CE-VAU.
*/
static const uint32_t rtl8188ce_rf_vals[] = {
0x30159, 0x31284, 0x98000, 0x18c63, 0x210e7, 0x2044f, 0x1adb1,
0x54867, 0x8992e, 0x0e52c, 0x39ce7, 0x00451, 0x00000, 0x10255,
0x60a00, 0xfc378, 0xa1250, 0x4445f, 0x80001, 0x0b614, 0x6c000,
0x00000, 0x01558, 0x00060, 0x00483, 0x4f200, 0xec7d9, 0x577c0,
0x04783, 0x00001, 0x21334, 0x00000, 0x00054, 0x00001, 0x00808,
0x53333, 0x0000c, 0x00002, 0x00808, 0x5b333, 0x0000d, 0x00003,
0x00808, 0x63333, 0x0000d, 0x00004, 0x00808, 0x6b333, 0x0000d,
0x00005, 0x00808, 0x73333, 0x0000d, 0x00006, 0x00709, 0x5b333,
0x0000d, 0x00007, 0x00709, 0x63333, 0x0000d, 0x00008, 0x0060a,
0x4b333, 0x0000d, 0x00009, 0x0060a, 0x53333, 0x0000d, 0x0000a,
0x0060a, 0x5b333, 0x0000d, 0x0000b, 0x0060a, 0x63333, 0x0000d,
0x0000c, 0x0060a, 0x6b333, 0x0000d, 0x0000d, 0x0060a, 0x73333,
0x0000d, 0x0000e, 0x0050b, 0x66666, 0x0001a, 0xe0000, 0x4000f,
0xe31fc, 0x6000f, 0xff9f8, 0x2000f, 0x203f9, 0x3000f, 0xff500,
0x00000, 0x00000, 0x8000f, 0x3f100, 0x9000f, 0x23100, 0x32000,
0x71000, 0xb0000, 0xfc000, 0x287b3, 0x244b7, 0x204ab, 0x1c49f,
0x18493, 0x1429b, 0x10299, 0x0c29c, 0x081a0, 0x040ac, 0x00020,
0x1944c, 0x59444, 0x9944c, 0xd9444, 0x0f424, 0x4f424, 0x8f424,
0xcf424, 0xe0330, 0xa0330, 0x60330, 0x20330, 0x10159, 0x0f401,
0x00000, 0x00000, 0x80003, 0x00000, 0x00000, 0x44457, 0x80000,
0x30159
};
static const struct urtwn_rf_prog rtl8188ce_rf_prog[] = {
{
nitems(rtl8192ce_rf1_regs),
rtl8192ce_rf1_regs,
rtl8188ce_rf_vals
}
};
/*
* RTL8188CU.
*/
static const uint32_t rtl8188cu_rf_vals[] = {
0x30159, 0x31284, 0x98000, 0x18c63, 0x210e7, 0x2044f, 0x1adb1,
0x54867, 0x8992e, 0x0e52c, 0x39ce7, 0x00451, 0x00000, 0x10255,
0x60a00, 0xfc378, 0xa1250, 0x4445f, 0x80001, 0x0b614, 0x6c000,
0x00000, 0x01558, 0x00060, 0x00483, 0x4f000, 0xec7d9, 0x577c0,
0x04783, 0x00001, 0x21334, 0x00000, 0x00054, 0x00001, 0x00808,
0x53333, 0x0000c, 0x00002, 0x00808, 0x5b333, 0x0000d, 0x00003,
0x00808, 0x63333, 0x0000d, 0x00004, 0x00808, 0x6b333, 0x0000d,
0x00005, 0x00808, 0x73333, 0x0000d, 0x00006, 0x00709, 0x5b333,
0x0000d, 0x00007, 0x00709, 0x63333, 0x0000d, 0x00008, 0x0060a,
0x4b333, 0x0000d, 0x00009, 0x0060a, 0x53333, 0x0000d, 0x0000a,
0x0060a, 0x5b333, 0x0000d, 0x0000b, 0x0060a, 0x63333, 0x0000d,
0x0000c, 0x0060a, 0x6b333, 0x0000d, 0x0000d, 0x0060a, 0x73333,
0x0000d, 0x0000e, 0x0050b, 0x66666, 0x0001a, 0xe0000, 0x4000f,
0xe31fc, 0x6000f, 0xff9f8, 0x2000f, 0x203f9, 0x3000f, 0xff500,
0x00000, 0x00000, 0x8000f, 0x3f100, 0x9000f, 0x23100, 0x32000,
0x71000, 0xb0000, 0xfc000, 0x287b3, 0x244b7, 0x204ab, 0x1c49f,
0x18493, 0x1429b, 0x10299, 0x0c29c, 0x081a0, 0x040ac, 0x00020,
0x1944c, 0x59444, 0x9944c, 0xd9444, 0x0f405, 0x4f405, 0x8f405,
0xcf405, 0xe0330, 0xa0330, 0x60330, 0x20330, 0x10159, 0x0f401,
0x00000, 0x00000, 0x80003, 0x00000, 0x00000, 0x44457, 0x80000,
0x30159
};
static const struct urtwn_rf_prog rtl8188cu_rf_prog[] = {
{
nitems(rtl8192ce_rf1_regs),
rtl8192ce_rf1_regs,
rtl8188cu_rf_vals
}
};
/*
* RTL8188EU.
*/
static const uint8_t rtl8188eu_rf_regs[] = {
0x00, 0x08, 0x18, 0x19, 0x1e, 0x1f, 0x2f, 0x3f, 0x42, 0x57,
0x58, 0x67, 0x83, 0xb0, 0xb1, 0xb2, 0xb4, 0xb6, 0xb7, 0xb8,
0xb9, 0xba, 0xbb, 0xbf, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0xca, 0xdf, 0xef, 0x51, 0x52, 0x53, 0x56,
0x35, 0x35, 0x35, 0x36, 0x36, 0x36, 0x36, 0xb6, 0x18, 0x5a,
0x19, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34, 0x34,
0x34, 0x34, 0x00, 0x84, 0x86, 0x87, 0x8e, 0x8f, 0xef, 0x3b,
0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0x3b,
0x3b, 0x3b, 0x3b, 0x3b, 0x3b, 0xef, 0x00, 0x18, 0xfe, 0xfe,
0x1f, 0xfe, 0xfe, 0x1e, 0x1f, 0x00
};
static const uint32_t rtl8188eu_rf_vals[] = {
0x30000, 0x84000, 0x00407, 0x00012, 0x80009, 0x00880, 0x1a060,
0x00000, 0x060c0, 0xd0000, 0xbe180, 0x01552, 0x00000, 0xff8fc,
0x54400, 0xccc19, 0x43003, 0x4953e, 0x1c718, 0x060ff, 0x80001,
0x40000, 0x00400, 0xc0000, 0x02400, 0x00009, 0x40c91, 0x99999,
0x000a3, 0x88820, 0x76c06, 0x00000, 0x80000, 0x00180, 0x001a0,
0x6b27d, 0x7e49d, 0x00073, 0x51ff3, 0x00086, 0x00186,
0x00286, 0x01c25, 0x09c25, 0x11c25, 0x19c25, 0x48538, 0x00c07,
0x4bd00, 0x739d0, 0x0adf3, 0x09df0, 0x08ded, 0x07dea, 0x06de7,
0x054ee, 0x044eb, 0x034e8, 0x0246b, 0x01468, 0x0006d, 0x30159,
0x68200, 0x000ce, 0x48a00, 0x65540, 0x88000, 0x020a0, 0xf02b0,
0xef7b0, 0xd4fb0, 0xcf060, 0xb0090, 0xa0080, 0x90080, 0x8f780,
0x722b0, 0x6f7b0, 0x54fb0, 0x4f060, 0x30090, 0x20080, 0x10080,
0x0f780, 0x000a0, 0x10159, 0x0f407, 0x00000, 0x00000, 0x80003,
0x00000, 0x00000, 0x00001, 0x80000, 0x33e60
};
static const struct urtwn_rf_prog rtl8188eu_rf_prog[] = {
{
nitems(rtl8188eu_rf_regs),
rtl8188eu_rf_regs,
rtl8188eu_rf_vals
}
};
/*
* RTL8188RU.
*/
static const uint32_t rtl8188ru_rf_vals[] = {
0x30159, 0x31284, 0x98000, 0x18c63, 0x210e7, 0x2044f, 0x1adb0,
0x54867, 0x8992e, 0x0e529, 0x39ce7, 0x00451, 0x00000, 0x00255,
0x60a00, 0xfc378, 0xa1250, 0x4445f, 0x80001, 0x0b614, 0x6c000,
0x0083c, 0x01558, 0x00060, 0x00483, 0x4f000, 0xec7d9, 0x977c0,
0x04783, 0x00001, 0x21334, 0x00000, 0x00054, 0x00001, 0x00808,
0x53333, 0x0000c, 0x00002, 0x00808, 0x5b333, 0x0000d, 0x00003,
0x00808, 0x63333, 0x0000d, 0x00004, 0x00808, 0x6b333, 0x0000d,
0x00005, 0x00808, 0x73333, 0x0000d, 0x00006, 0x00709, 0x5b333,
0x0000d, 0x00007, 0x00709, 0x63333, 0x0000d, 0x00008, 0x0060a,
0x4b333, 0x0000d, 0x00009, 0x0060a, 0x53333, 0x0000d, 0x0000a,
0x0060a, 0x5b333, 0x0000d, 0x0000b, 0x0060a, 0x63333, 0x0000d,
0x0000c, 0x0060a, 0x6b333, 0x0000d, 0x0000d, 0x0060a, 0x73333,
0x0000d, 0x0000e, 0x0050b, 0x66666, 0x0001a, 0xe0000, 0x4000f,
0xe31fc, 0x6000f, 0xff9f8, 0x2000f, 0x203f9, 0x3000f, 0xff500,
0x00000, 0x00000, 0x8000f, 0x3f100, 0x9000f, 0x23100, 0xd8000,
0x90000, 0x51000, 0x12000, 0x28fb4, 0x24fa8, 0x207a4, 0x1c798,
0x183a4, 0x14398, 0x101a4, 0x0c198, 0x080a4, 0x04098, 0x00014,
0x1944c, 0x59444, 0x9944c, 0xd9444, 0x0f405, 0x4f405, 0x8f405,
0xcf405, 0xe0330, 0xa0330, 0x60330, 0x20330, 0x10159, 0x0f401,
0x00000, 0x00000, 0x80003, 0x00000, 0x00000, 0x44457, 0x80000,
0x30159
};
static const struct urtwn_rf_prog rtl8188ru_rf_prog[] = {
{
nitems(rtl8192ce_rf1_regs),
rtl8192ce_rf1_regs,
rtl8188ru_rf_vals
}
};
struct urtwn_txpwr {
uint8_t pwr[3][28];
};
struct urtwn_r88e_txpwr {
uint8_t pwr[6][28];
};
/*
* Per RF chain/group/rate Tx gain values.
*/
static const struct urtwn_txpwr rtl8192cu_txagc[] = {
{ { /* Chain 0. */
{ /* Group 0. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x0c, 0x0c, 0x0c, 0x0a, 0x08, 0x06, 0x04, 0x02, /* OFDM6~54. */
0x0e, 0x0d, 0x0c, 0x0a, 0x08, 0x06, 0x04, 0x02, /* MCS0~7. */
0x0e, 0x0d, 0x0c, 0x0a, 0x08, 0x06, 0x04, 0x02 /* MCS8~15. */
},
{ /* Group 1. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
},
{ /* Group 2. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x04, 0x04, 0x04, 0x04, 0x04, 0x02, 0x02, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
}
} },
{ { /* Chain 1. */
{ /* Group 0. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
},
{ /* Group 1. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
},
{ /* Group 2. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x04, 0x04, 0x04, 0x04, 0x04, 0x02, 0x02, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
}
} }
};
static const struct urtwn_txpwr rtl8188ru_txagc[] = {
{ { /* Chain 0. */
{ /* Group 0. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x08, 0x08, 0x08, 0x06, 0x06, 0x04, 0x04, 0x00, /* OFDM6~54. */
0x08, 0x06, 0x06, 0x04, 0x04, 0x02, 0x02, 0x00, /* MCS0~7. */
0x08, 0x06, 0x06, 0x04, 0x04, 0x02, 0x02, 0x00 /* MCS8~15. */
},
{ /* Group 1. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
},
{ /* Group 2. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
}
} }
};
static const struct urtwn_r88e_txpwr rtl8188eu_txagc[] = {
{ { /* Chain 0. */
{ /* Group 0. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
},
{ /* Group 1. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
},
{ /* Group 2. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
},
{ /* Group 3. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
},
{ /* Group 4. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
},
{ /* Group 5. */
0x00, 0x00, 0x00, 0x00, /* CCK1~11. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* OFDM6~54. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* MCS0~7. */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* MCS8~15. */
}
} }
};
Index: head/sys/dev/usb/wlan/if_urtwvar.h
===================================================================
--- head/sys/dev/usb/wlan/if_urtwvar.h (revision 287196)
+++ head/sys/dev/usb/wlan/if_urtwvar.h (revision 287197)
@@ -1,188 +1,188 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2008 Weongyo Jeong <weongyo@FreeBSD.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
enum {
URTW_8187B_BULK_RX,
URTW_8187B_BULK_TX_STATUS,
URTW_8187B_BULK_TX_BE,
URTW_8187B_BULK_TX_BK,
URTW_8187B_BULK_TX_VI,
URTW_8187B_BULK_TX_VO,
URTW_8187B_BULK_TX_EP12,
URTW_8187B_N_XFERS = 7
};
enum {
URTW_8187L_BULK_RX,
URTW_8187L_BULK_TX_LOW,
URTW_8187L_BULK_TX_NORMAL,
URTW_8187L_N_XFERS = 3
};
/* XXX no definition at net80211? */
#define URTW_MAX_CHANNELS 15
struct urtw_data {
struct urtw_softc *sc;
uint8_t *buf;
uint16_t buflen;
struct mbuf *m;
struct ieee80211_node *ni; /* NB: tx only */
STAILQ_ENTRY(urtw_data) next;
};
typedef STAILQ_HEAD(, urtw_data) urtw_datahead;
/* XXX not correct.. */
#define URTW_MIN_RXBUFSZ \
(sizeof(struct ieee80211_frame_min))
#define URTW_RX_DATA_LIST_COUNT 4
#define URTW_TX_DATA_LIST_COUNT 16
#define URTW_RX_MAXSIZE 0x9c4
#define URTW_TX_MAXSIZE 0x9c4
#define URTW_TX_MAXRETRY 11
struct urtw_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_dbm_antsignal;
} __packed __aligned(8);
#define URTW_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL))
struct urtw_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed __aligned(8);
#define URTW_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct urtw_stats {
unsigned int txrates[12];
};
struct urtw_vap {
struct ieee80211vap vap;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define URTW_VAP(vap) ((struct urtw_vap *)(vap))
struct urtw_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct usb_device *sc_udev;
struct mtx sc_mtx;
void *sc_tx_dma_buf;
int sc_debug;
- int sc_if_flags;
int sc_flags;
#define URTW_INIT_ONCE (1 << 1)
#define URTW_RTL8187B (1 << 2)
#define URTW_RTL8187B_REV_B (1 << 3)
#define URTW_RTL8187B_REV_D (1 << 4)
#define URTW_RTL8187B_REV_E (1 << 5)
#define URTW_DETACHED (1 << 6)
+#define URTW_RUNNING (1 << 7)
enum ieee80211_state sc_state;
int sc_epromtype;
#define URTW_EEPROM_93C46 0
#define URTW_EEPROM_93C56 1
uint8_t sc_crcmon;
- uint8_t sc_bssid[IEEE80211_ADDR_LEN];
struct ieee80211_channel *sc_curchan;
/* for RF */
usb_error_t (*sc_rf_init)(struct urtw_softc *);
usb_error_t (*sc_rf_set_chan)(struct urtw_softc *,
int);
usb_error_t (*sc_rf_set_sens)(struct urtw_softc *,
int);
usb_error_t (*sc_rf_stop)(struct urtw_softc *);
uint8_t sc_rfchip;
uint32_t sc_max_sens;
uint32_t sc_sens;
/* for LED */
struct usb_callout sc_led_ch;
struct task sc_led_task;
uint8_t sc_psr;
uint8_t sc_strategy;
#define URTW_LED_GPIO 1
uint8_t sc_gpio_ledon;
uint8_t sc_gpio_ledinprogress;
uint8_t sc_gpio_ledstate;
uint8_t sc_gpio_ledpin;
uint8_t sc_gpio_blinktime;
uint8_t sc_gpio_blinkstate;
/* RX/TX */
struct usb_xfer *sc_xfer[URTW_8187B_N_XFERS];
#define URTW_PRIORITY_LOW 0
#define URTW_PRIORITY_NORMAL 1
#define URTW_DATA_TIMEOUT 10000 /* 10 sec */
#define URTW_8187B_TXPIPE_BE 0x6 /* best effort */
#define URTW_8187B_TXPIPE_BK 0x7 /* background */
#define URTW_8187B_TXPIPE_VI 0x5 /* video */
#define URTW_8187B_TXPIPE_VO 0x4 /* voice */
#define URTW_8187B_TXPIPE_MAX 4
struct urtw_data sc_rx[URTW_RX_DATA_LIST_COUNT];
urtw_datahead sc_rx_active;
urtw_datahead sc_rx_inactive;
struct urtw_data sc_tx[URTW_TX_DATA_LIST_COUNT];
urtw_datahead sc_tx_active;
urtw_datahead sc_tx_inactive;
urtw_datahead sc_tx_pending;
uint8_t sc_rts_retry;
uint8_t sc_tx_retry;
uint8_t sc_preamble_mode;
#define URTW_PREAMBLE_MODE_SHORT 1
#define URTW_PREAMBLE_MODE_LONG 2
struct callout sc_watchdog_ch;
int sc_txtimer;
int sc_currate;
/* TX power */
uint8_t sc_txpwr_cck[URTW_MAX_CHANNELS];
uint8_t sc_txpwr_cck_base;
uint8_t sc_txpwr_ofdm[URTW_MAX_CHANNELS];
uint8_t sc_txpwr_ofdm_base;
uint8_t sc_acmctl;
uint64_t sc_txstatus; /* only for 8187B */
struct task sc_updateslot_task;
struct urtw_stats sc_stats;
struct urtw_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct urtw_tx_radiotap_header sc_txtap;
int sc_txtap_len;
};
#define URTW_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define URTW_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define URTW_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED)
Index: head/sys/dev/usb/wlan/if_zyd.c
===================================================================
--- head/sys/dev/usb/wlan/if_zyd.c (revision 287196)
+++ head/sys/dev/usb/wlan/if_zyd.c (revision 287197)
@@ -1,2977 +1,2912 @@
/* $OpenBSD: if_zyd.c,v 1.52 2007/02/11 00:08:04 jsg Exp $ */
/* $NetBSD: if_zyd.c,v 1.7 2007/06/21 04:04:29 kiyohara Exp $ */
/* $FreeBSD$ */
/*-
* Copyright (c) 2006 by Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2006 by Florian Stoehr <ich@florian-stoehr.de>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* ZyDAS ZD1211/ZD1211B USB WLAN driver.
*/
#include <sys/param.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/condvar.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/kdb.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#endif
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/usb/usb.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
#include "usbdevs.h"
#include <dev/usb/wlan/if_zydreg.h>
#include <dev/usb/wlan/if_zydfw.h>
#ifdef USB_DEBUG
static int zyd_debug = 0;
static SYSCTL_NODE(_hw_usb, OID_AUTO, zyd, CTLFLAG_RW, 0, "USB zyd");
SYSCTL_INT(_hw_usb_zyd, OID_AUTO, debug, CTLFLAG_RWTUN, &zyd_debug, 0,
"zyd debug level");
enum {
ZYD_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
ZYD_DEBUG_RECV = 0x00000002, /* basic recv operation */
ZYD_DEBUG_RESET = 0x00000004, /* reset processing */
ZYD_DEBUG_INIT = 0x00000008, /* device init */
ZYD_DEBUG_TX_PROC = 0x00000010, /* tx ISR proc */
ZYD_DEBUG_RX_PROC = 0x00000020, /* rx ISR proc */
ZYD_DEBUG_STATE = 0x00000040, /* 802.11 state transitions */
ZYD_DEBUG_STAT = 0x00000080, /* statistic */
ZYD_DEBUG_FW = 0x00000100, /* firmware */
ZYD_DEBUG_CMD = 0x00000200, /* fw commands */
ZYD_DEBUG_ANY = 0xffffffff
};
#define DPRINTF(sc, m, fmt, ...) do { \
if (zyd_debug & (m)) \
printf("%s: " fmt, __func__, ## __VA_ARGS__); \
} while (0)
#else
#define DPRINTF(sc, m, fmt, ...) do { \
(void) sc; \
} while (0)
#endif
#define zyd_do_request(sc,req,data) \
usbd_do_request_flags((sc)->sc_udev, &(sc)->sc_mtx, req, data, 0, NULL, 5000)
static device_probe_t zyd_match;
static device_attach_t zyd_attach;
static device_detach_t zyd_detach;
static usb_callback_t zyd_intr_read_callback;
static usb_callback_t zyd_intr_write_callback;
static usb_callback_t zyd_bulk_read_callback;
static usb_callback_t zyd_bulk_write_callback;
static struct ieee80211vap *zyd_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void zyd_vap_delete(struct ieee80211vap *);
static void zyd_tx_free(struct zyd_tx_data *, int);
static void zyd_setup_tx_list(struct zyd_softc *);
static void zyd_unsetup_tx_list(struct zyd_softc *);
static int zyd_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static int zyd_cmd(struct zyd_softc *, uint16_t, const void *, int,
void *, int, int);
static int zyd_read16(struct zyd_softc *, uint16_t, uint16_t *);
static int zyd_read32(struct zyd_softc *, uint16_t, uint32_t *);
static int zyd_write16(struct zyd_softc *, uint16_t, uint16_t);
static int zyd_write32(struct zyd_softc *, uint16_t, uint32_t);
static int zyd_rfwrite(struct zyd_softc *, uint32_t);
static int zyd_lock_phy(struct zyd_softc *);
static int zyd_unlock_phy(struct zyd_softc *);
static int zyd_rf_attach(struct zyd_softc *, uint8_t);
static const char *zyd_rf_name(uint8_t);
static int zyd_hw_init(struct zyd_softc *);
static int zyd_read_pod(struct zyd_softc *);
static int zyd_read_eeprom(struct zyd_softc *);
static int zyd_get_macaddr(struct zyd_softc *);
static int zyd_set_macaddr(struct zyd_softc *, const uint8_t *);
static int zyd_set_bssid(struct zyd_softc *, const uint8_t *);
static int zyd_switch_radio(struct zyd_softc *, int);
static int zyd_set_led(struct zyd_softc *, int, int);
static void zyd_set_multi(struct zyd_softc *);
static void zyd_update_mcast(struct ieee80211com *);
static int zyd_set_rxfilter(struct zyd_softc *);
static void zyd_set_chan(struct zyd_softc *, struct ieee80211_channel *);
static int zyd_set_beacon_interval(struct zyd_softc *, int);
static void zyd_rx_data(struct usb_xfer *, int, uint16_t);
static int zyd_tx_start(struct zyd_softc *, struct mbuf *,
struct ieee80211_node *);
-static void zyd_start(struct ifnet *);
+static int zyd_transmit(struct ieee80211com *, struct mbuf *);
+static void zyd_start(struct zyd_softc *);
static int zyd_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
-static int zyd_ioctl(struct ifnet *, u_long, caddr_t);
+static void zyd_parent(struct ieee80211com *);
static void zyd_init_locked(struct zyd_softc *);
-static void zyd_init(void *);
static void zyd_stop(struct zyd_softc *);
static int zyd_loadfirmware(struct zyd_softc *);
static void zyd_scan_start(struct ieee80211com *);
static void zyd_scan_end(struct ieee80211com *);
static void zyd_set_channel(struct ieee80211com *);
static int zyd_rfmd_init(struct zyd_rf *);
static int zyd_rfmd_switch_radio(struct zyd_rf *, int);
static int zyd_rfmd_set_channel(struct zyd_rf *, uint8_t);
static int zyd_al2230_init(struct zyd_rf *);
static int zyd_al2230_switch_radio(struct zyd_rf *, int);
static int zyd_al2230_set_channel(struct zyd_rf *, uint8_t);
static int zyd_al2230_set_channel_b(struct zyd_rf *, uint8_t);
static int zyd_al2230_init_b(struct zyd_rf *);
static int zyd_al7230B_init(struct zyd_rf *);
static int zyd_al7230B_switch_radio(struct zyd_rf *, int);
static int zyd_al7230B_set_channel(struct zyd_rf *, uint8_t);
static int zyd_al2210_init(struct zyd_rf *);
static int zyd_al2210_switch_radio(struct zyd_rf *, int);
static int zyd_al2210_set_channel(struct zyd_rf *, uint8_t);
static int zyd_gct_init(struct zyd_rf *);
static int zyd_gct_switch_radio(struct zyd_rf *, int);
static int zyd_gct_set_channel(struct zyd_rf *, uint8_t);
static int zyd_gct_mode(struct zyd_rf *);
static int zyd_gct_set_channel_synth(struct zyd_rf *, int, int);
static int zyd_gct_write(struct zyd_rf *, uint16_t);
static int zyd_gct_txgain(struct zyd_rf *, uint8_t);
static int zyd_maxim2_init(struct zyd_rf *);
static int zyd_maxim2_switch_radio(struct zyd_rf *, int);
static int zyd_maxim2_set_channel(struct zyd_rf *, uint8_t);
static const struct zyd_phy_pair zyd_def_phy[] = ZYD_DEF_PHY;
static const struct zyd_phy_pair zyd_def_phyB[] = ZYD_DEF_PHYB;
/* various supported device vendors/products */
#define ZYD_ZD1211 0
#define ZYD_ZD1211B 1
#define ZYD_ZD1211_DEV(v,p) \
{ USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, ZYD_ZD1211) }
#define ZYD_ZD1211B_DEV(v,p) \
{ USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, ZYD_ZD1211B) }
static const STRUCT_USB_HOST_ID zyd_devs[] = {
/* ZYD_ZD1211 */
ZYD_ZD1211_DEV(3COM2, 3CRUSB10075),
ZYD_ZD1211_DEV(ABOCOM, WL54),
ZYD_ZD1211_DEV(ASUS, WL159G),
ZYD_ZD1211_DEV(CYBERTAN, TG54USB),
ZYD_ZD1211_DEV(DRAYTEK, VIGOR550),
ZYD_ZD1211_DEV(PLANEX2, GWUS54GD),
ZYD_ZD1211_DEV(PLANEX2, GWUS54GZL),
ZYD_ZD1211_DEV(PLANEX3, GWUS54GZ),
ZYD_ZD1211_DEV(PLANEX3, GWUS54MINI),
ZYD_ZD1211_DEV(SAGEM, XG760A),
ZYD_ZD1211_DEV(SENAO, NUB8301),
ZYD_ZD1211_DEV(SITECOMEU, WL113),
ZYD_ZD1211_DEV(SWEEX, ZD1211),
ZYD_ZD1211_DEV(TEKRAM, QUICKWLAN),
ZYD_ZD1211_DEV(TEKRAM, ZD1211_1),
ZYD_ZD1211_DEV(TEKRAM, ZD1211_2),
ZYD_ZD1211_DEV(TWINMOS, G240),
ZYD_ZD1211_DEV(UMEDIA, ALL0298V2),
ZYD_ZD1211_DEV(UMEDIA, TEW429UB_A),
ZYD_ZD1211_DEV(UMEDIA, TEW429UB),
ZYD_ZD1211_DEV(WISTRONNEWEB, UR055G),
ZYD_ZD1211_DEV(ZCOM, ZD1211),
ZYD_ZD1211_DEV(ZYDAS, ZD1211),
ZYD_ZD1211_DEV(ZYXEL, AG225H),
ZYD_ZD1211_DEV(ZYXEL, ZYAIRG220),
ZYD_ZD1211_DEV(ZYXEL, G200V2),
/* ZYD_ZD1211B */
ZYD_ZD1211B_DEV(ACCTON, SMCWUSBG_NF),
ZYD_ZD1211B_DEV(ACCTON, SMCWUSBG),
ZYD_ZD1211B_DEV(ACCTON, ZD1211B),
ZYD_ZD1211B_DEV(ASUS, A9T_WIFI),
ZYD_ZD1211B_DEV(BELKIN, F5D7050_V4000),
ZYD_ZD1211B_DEV(BELKIN, ZD1211B),
ZYD_ZD1211B_DEV(CISCOLINKSYS, WUSBF54G),
ZYD_ZD1211B_DEV(FIBERLINE, WL430U),
ZYD_ZD1211B_DEV(MELCO, KG54L),
ZYD_ZD1211B_DEV(PHILIPS, SNU5600),
ZYD_ZD1211B_DEV(PLANEX2, GW_US54GXS),
ZYD_ZD1211B_DEV(SAGEM, XG76NA),
ZYD_ZD1211B_DEV(SITECOMEU, ZD1211B),
ZYD_ZD1211B_DEV(UMEDIA, TEW429UBC1),
ZYD_ZD1211B_DEV(USR, USR5423),
ZYD_ZD1211B_DEV(VTECH, ZD1211B),
ZYD_ZD1211B_DEV(ZCOM, ZD1211B),
ZYD_ZD1211B_DEV(ZYDAS, ZD1211B),
ZYD_ZD1211B_DEV(ZYXEL, M202),
ZYD_ZD1211B_DEV(ZYXEL, G202),
ZYD_ZD1211B_DEV(ZYXEL, G220V2)
};
static const struct usb_config zyd_config[ZYD_N_TRANSFER] = {
[ZYD_BULK_WR] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = ZYD_MAX_TXBUFSZ,
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = zyd_bulk_write_callback,
.ep_index = 0,
.timeout = 10000, /* 10 seconds */
},
[ZYD_BULK_RD] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = ZYX_MAX_RXBUFSZ,
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = zyd_bulk_read_callback,
.ep_index = 0,
},
[ZYD_INTR_WR] = {
.type = UE_BULK_INTR,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = sizeof(struct zyd_cmd),
.flags = {.pipe_bof = 1,.force_short_xfer = 1,},
.callback = zyd_intr_write_callback,
.timeout = 1000, /* 1 second */
.ep_index = 1,
},
[ZYD_INTR_RD] = {
.type = UE_INTERRUPT,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
.bufsize = sizeof(struct zyd_cmd),
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = zyd_intr_read_callback,
},
};
#define zyd_read16_m(sc, val, data) do { \
error = zyd_read16(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define zyd_write16_m(sc, val, data) do { \
error = zyd_write16(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define zyd_read32_m(sc, val, data) do { \
error = zyd_read32(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
#define zyd_write32_m(sc, val, data) do { \
error = zyd_write32(sc, val, data); \
if (error != 0) \
goto fail; \
} while (0)
static int
zyd_match(device_t dev)
{
struct usb_attach_arg *uaa = device_get_ivars(dev);
if (uaa->usb_mode != USB_MODE_HOST)
return (ENXIO);
if (uaa->info.bConfigIndex != ZYD_CONFIG_INDEX)
return (ENXIO);
if (uaa->info.bIfaceIndex != ZYD_IFACE_INDEX)
return (ENXIO);
return (usbd_lookup_id_by_uaa(zyd_devs, sizeof(zyd_devs), uaa));
}
static int
zyd_attach(device_t dev)
{
struct usb_attach_arg *uaa = device_get_ivars(dev);
struct zyd_softc *sc = device_get_softc(dev);
- struct ifnet *ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
uint8_t iface_index, bands;
int error;
if (uaa->info.bcdDevice < 0x4330) {
device_printf(dev, "device version mismatch: 0x%X "
"(only >= 43.30 supported)\n",
uaa->info.bcdDevice);
return (EINVAL);
}
device_set_usb_desc(dev);
sc->sc_dev = dev;
sc->sc_udev = uaa->device;
sc->sc_macrev = USB_GET_DRIVER_INFO(uaa);
mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev),
MTX_NETWORK_LOCK, MTX_DEF);
STAILQ_INIT(&sc->sc_rqh);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
iface_index = ZYD_IFACE_INDEX;
error = usbd_transfer_setup(uaa->device,
&iface_index, sc->sc_xfer, zyd_config,
ZYD_N_TRANSFER, sc, &sc->sc_mtx);
if (error) {
device_printf(dev, "could not allocate USB transfers, "
"err=%s\n", usbd_errstr(error));
goto detach;
}
ZYD_LOCK(sc);
if ((error = zyd_get_macaddr(sc)) != 0) {
device_printf(sc->sc_dev, "could not read EEPROM\n");
ZYD_UNLOCK(sc);
goto detach;
}
ZYD_UNLOCK(sc);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "can not if_alloc()\n");
- goto detach;
- }
- ifp->if_softc = sc;
- if_initname(ifp, "zyd", device_get_unit(sc->sc_dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = zyd_init;
- ifp->if_ioctl = zyd_ioctl;
- ifp->if_start = zyd_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- IFQ_SET_READY(&ifp->if_snd);
-
- ic = ifp->if_l2com;
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA;
/* set device capabilities */
ic->ic_caps =
IEEE80211_C_STA /* station mode */
| IEEE80211_C_MONITOR /* monitor mode */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
| IEEE80211_C_WPA /* 802.11i */
;
bands = 0;
setbit(&bands, IEEE80211_MODE_11B);
setbit(&bands, IEEE80211_MODE_11G);
ieee80211_init_channels(ic, NULL, &bands);
- ieee80211_ifattach(ic, sc->sc_bssid);
+ ieee80211_ifattach(ic);
ic->ic_raw_xmit = zyd_raw_xmit;
ic->ic_scan_start = zyd_scan_start;
ic->ic_scan_end = zyd_scan_end;
ic->ic_set_channel = zyd_set_channel;
-
ic->ic_vap_create = zyd_vap_create;
ic->ic_vap_delete = zyd_vap_delete;
ic->ic_update_mcast = zyd_update_mcast;
ic->ic_update_promisc = zyd_update_mcast;
+ ic->ic_parent = zyd_parent;
+ ic->ic_transmit = zyd_transmit;
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
ZYD_TX_RADIOTAP_PRESENT,
&sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
ZYD_RX_RADIOTAP_PRESENT);
if (bootverbose)
ieee80211_announce(ic);
return (0);
detach:
zyd_detach(dev);
return (ENXIO); /* failure */
}
static int
zyd_detach(device_t dev)
{
struct zyd_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
unsigned int x;
/*
* Prevent further allocations from RX/TX data
* lists and ioctls:
*/
ZYD_LOCK(sc);
sc->sc_flags |= ZYD_FLAG_DETACHED;
STAILQ_INIT(&sc->tx_q);
STAILQ_INIT(&sc->tx_free);
ZYD_UNLOCK(sc);
/* drain USB transfers */
for (x = 0; x != ZYD_N_TRANSFER; x++)
usbd_transfer_drain(sc->sc_xfer[x]);
/* free TX list, if any */
ZYD_LOCK(sc);
zyd_unsetup_tx_list(sc);
ZYD_UNLOCK(sc);
/* free USB transfers and some data buffers */
usbd_transfer_unsetup(sc->sc_xfer, ZYD_N_TRANSFER);
- if (ifp) {
- ic = ifp->if_l2com;
+ if (ic->ic_softc == sc)
ieee80211_ifdetach(ic);
- if_free(ifp);
- }
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static struct ieee80211vap *
zyd_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct zyd_vap *zvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return (NULL);
- zvp = (struct zyd_vap *) malloc(sizeof(struct zyd_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (zvp == NULL)
- return (NULL);
+ zvp = malloc(sizeof(struct zyd_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &zvp->vap;
/* enable s/w bmiss handling for sta mode */
if (ieee80211_vap_setup(ic, vap, name, unit, opmode,
- flags | IEEE80211_CLONE_NOBEACONS, bssid, mac) != 0) {
+ flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) {
/* out of memory */
free(zvp, M_80211_VAP);
return (NULL);
}
/* override state transition machine */
zvp->newstate = vap->iv_newstate;
vap->iv_newstate = zyd_newstate;
ieee80211_ratectl_init(vap);
ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */);
/* complete setup */
ieee80211_vap_attach(vap, ieee80211_media_change,
- ieee80211_media_status);
+ ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return (vap);
}
static void
zyd_vap_delete(struct ieee80211vap *vap)
{
struct zyd_vap *zvp = ZYD_VAP(vap);
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
free(zvp, M_80211_VAP);
}
static void
zyd_tx_free(struct zyd_tx_data *data, int txerr)
{
struct zyd_softc *sc = data->sc;
if (data->m != NULL) {
- if (data->m->m_flags & M_TXCB)
- ieee80211_process_callback(data->ni, data->m,
- txerr ? ETIMEDOUT : 0);
- m_freem(data->m);
+ ieee80211_tx_complete(data->ni, data->m, txerr);
data->m = NULL;
-
- ieee80211_free_node(data->ni);
data->ni = NULL;
}
STAILQ_INSERT_TAIL(&sc->tx_free, data, next);
sc->tx_nfree++;
}
static void
zyd_setup_tx_list(struct zyd_softc *sc)
{
struct zyd_tx_data *data;
int i;
sc->tx_nfree = 0;
STAILQ_INIT(&sc->tx_q);
STAILQ_INIT(&sc->tx_free);
for (i = 0; i < ZYD_TX_LIST_CNT; i++) {
data = &sc->tx_data[i];
data->sc = sc;
STAILQ_INSERT_TAIL(&sc->tx_free, data, next);
sc->tx_nfree++;
}
}
static void
zyd_unsetup_tx_list(struct zyd_softc *sc)
{
struct zyd_tx_data *data;
int i;
/* make sure any subsequent use of the queues will fail */
sc->tx_nfree = 0;
STAILQ_INIT(&sc->tx_q);
STAILQ_INIT(&sc->tx_free);
/* free up all node references and mbufs */
for (i = 0; i < ZYD_TX_LIST_CNT; i++) {
data = &sc->tx_data[i];
if (data->m != NULL) {
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
}
}
static int
zyd_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct zyd_vap *zvp = ZYD_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct zyd_softc *sc = ic->ic_softc;
int error;
DPRINTF(sc, ZYD_DEBUG_STATE, "%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]);
IEEE80211_UNLOCK(ic);
ZYD_LOCK(sc);
switch (nstate) {
case IEEE80211_S_AUTH:
zyd_set_chan(sc, ic->ic_curchan);
break;
case IEEE80211_S_RUN:
if (vap->iv_opmode == IEEE80211_M_MONITOR)
break;
/* turn link LED on */
error = zyd_set_led(sc, ZYD_LED1, 1);
if (error != 0)
break;
/* make data LED blink upon Tx */
zyd_write32_m(sc, sc->sc_fwbase + ZYD_FW_LINK_STATUS, 1);
- IEEE80211_ADDR_COPY(sc->sc_bssid, vap->iv_bss->ni_bssid);
- zyd_set_bssid(sc, sc->sc_bssid);
+ IEEE80211_ADDR_COPY(ic->ic_macaddr, vap->iv_bss->ni_bssid);
+ zyd_set_bssid(sc, ic->ic_macaddr);
break;
default:
break;
}
fail:
ZYD_UNLOCK(sc);
IEEE80211_LOCK(ic);
return (zvp->newstate(vap, nstate, arg));
}
/*
* Callback handler for interrupt transfer
*/
static void
zyd_intr_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct zyd_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni;
struct zyd_cmd *cmd = &sc->sc_ibuf;
struct usb_page_cache *pc;
int datalen;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, cmd, sizeof(*cmd));
switch (le16toh(cmd->code)) {
case ZYD_NOTIF_RETRYSTATUS:
{
struct zyd_notif_retry *retry =
(struct zyd_notif_retry *)cmd->data;
DPRINTF(sc, ZYD_DEBUG_TX_PROC,
"retry intr: rate=0x%x addr=%s count=%d (0x%x)\n",
le16toh(retry->rate), ether_sprintf(retry->macaddr),
le16toh(retry->count)&0xff, le16toh(retry->count));
/*
* Find the node to which the packet was sent and
* update its retry statistics. In BSS mode, this node
* is the AP we're associated to so no lookup is
* actually needed.
*/
ni = ieee80211_find_txnode(vap, retry->macaddr);
if (ni != NULL) {
int retrycnt =
(int)(le16toh(retry->count) & 0xff);
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_FAILURE,
&retrycnt, NULL);
ieee80211_free_node(ni);
}
if (le16toh(retry->count) & 0x100)
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* too many retries */
+ /* too many retries */
+ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS,
+ 1);
break;
}
case ZYD_NOTIF_IORD:
{
struct zyd_rq *rqp;
if (le16toh(*(uint16_t *)cmd->data) == ZYD_CR_INTERRUPT)
break; /* HMAC interrupt */
datalen = actlen - sizeof(cmd->code);
datalen -= 2; /* XXX: padding? */
STAILQ_FOREACH(rqp, &sc->sc_rqh, rq) {
int i;
int count;
if (rqp->olen != datalen)
continue;
count = rqp->olen / sizeof(struct zyd_pair);
for (i = 0; i < count; i++) {
if (*(((const uint16_t *)rqp->idata) + i) !=
(((struct zyd_pair *)cmd->data) + i)->reg)
break;
}
if (i != count)
continue;
/* copy answer into caller-supplied buffer */
memcpy(rqp->odata, cmd->data, rqp->olen);
DPRINTF(sc, ZYD_DEBUG_CMD,
"command %p complete, data = %*D \n",
rqp, rqp->olen, (char *)rqp->odata, ":");
wakeup(rqp); /* wakeup caller */
break;
}
if (rqp == NULL) {
device_printf(sc->sc_dev,
"unexpected IORD notification %*D\n",
datalen, cmd->data, ":");
}
break;
}
default:
device_printf(sc->sc_dev, "unknown notification %x\n",
le16toh(cmd->code));
}
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
break;
default: /* Error */
DPRINTF(sc, ZYD_DEBUG_CMD, "error = %s\n",
usbd_errstr(error));
if (error != USB_ERR_CANCELLED) {
/* try to clear stall first */
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static void
zyd_intr_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct zyd_softc *sc = usbd_xfer_softc(xfer);
struct zyd_rq *rqp, *cmd;
struct usb_page_cache *pc;
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
cmd = usbd_xfer_get_priv(xfer);
DPRINTF(sc, ZYD_DEBUG_CMD, "command %p transferred\n", cmd);
STAILQ_FOREACH(rqp, &sc->sc_rqh, rq) {
/* Ensure the cached rq pointer is still valid */
if (rqp == cmd &&
(rqp->flags & ZYD_CMD_FLAG_READ) == 0)
wakeup(rqp); /* wakeup caller */
}
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
STAILQ_FOREACH(rqp, &sc->sc_rqh, rq) {
if (rqp->flags & ZYD_CMD_FLAG_SENT)
continue;
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, rqp->cmd, rqp->ilen);
usbd_xfer_set_frame_len(xfer, 0, rqp->ilen);
usbd_xfer_set_priv(xfer, rqp);
rqp->flags |= ZYD_CMD_FLAG_SENT;
usbd_transfer_submit(xfer);
break;
}
break;
default: /* Error */
DPRINTF(sc, ZYD_DEBUG_ANY, "error = %s\n",
usbd_errstr(error));
if (error != USB_ERR_CANCELLED) {
/* try to clear stall first */
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static int
zyd_cmd(struct zyd_softc *sc, uint16_t code, const void *idata, int ilen,
void *odata, int olen, int flags)
{
struct zyd_cmd cmd;
struct zyd_rq rq;
int error;
if (ilen > (int)sizeof(cmd.data))
return (EINVAL);
cmd.code = htole16(code);
memcpy(cmd.data, idata, ilen);
DPRINTF(sc, ZYD_DEBUG_CMD, "sending cmd %p = %*D\n",
&rq, ilen, idata, ":");
rq.cmd = &cmd;
rq.idata = idata;
rq.odata = odata;
rq.ilen = sizeof(uint16_t) + ilen;
rq.olen = olen;
rq.flags = flags;
STAILQ_INSERT_TAIL(&sc->sc_rqh, &rq, rq);
usbd_transfer_start(sc->sc_xfer[ZYD_INTR_RD]);
usbd_transfer_start(sc->sc_xfer[ZYD_INTR_WR]);
/* wait at most one second for command reply */
error = mtx_sleep(&rq, &sc->sc_mtx, 0 , "zydcmd", hz);
if (error)
device_printf(sc->sc_dev, "command timeout\n");
STAILQ_REMOVE(&sc->sc_rqh, &rq, zyd_rq, rq);
DPRINTF(sc, ZYD_DEBUG_CMD, "finsihed cmd %p, error = %d \n",
&rq, error);
return (error);
}
static int
zyd_read16(struct zyd_softc *sc, uint16_t reg, uint16_t *val)
{
struct zyd_pair tmp;
int error;
reg = htole16(reg);
error = zyd_cmd(sc, ZYD_CMD_IORD, &reg, sizeof(reg), &tmp, sizeof(tmp),
ZYD_CMD_FLAG_READ);
if (error == 0)
*val = le16toh(tmp.val);
return (error);
}
static int
zyd_read32(struct zyd_softc *sc, uint16_t reg, uint32_t *val)
{
struct zyd_pair tmp[2];
uint16_t regs[2];
int error;
regs[0] = htole16(ZYD_REG32_HI(reg));
regs[1] = htole16(ZYD_REG32_LO(reg));
error = zyd_cmd(sc, ZYD_CMD_IORD, regs, sizeof(regs), tmp, sizeof(tmp),
ZYD_CMD_FLAG_READ);
if (error == 0)
*val = le16toh(tmp[0].val) << 16 | le16toh(tmp[1].val);
return (error);
}
static int
zyd_write16(struct zyd_softc *sc, uint16_t reg, uint16_t val)
{
struct zyd_pair pair;
pair.reg = htole16(reg);
pair.val = htole16(val);
return zyd_cmd(sc, ZYD_CMD_IOWR, &pair, sizeof(pair), NULL, 0, 0);
}
static int
zyd_write32(struct zyd_softc *sc, uint16_t reg, uint32_t val)
{
struct zyd_pair pair[2];
pair[0].reg = htole16(ZYD_REG32_HI(reg));
pair[0].val = htole16(val >> 16);
pair[1].reg = htole16(ZYD_REG32_LO(reg));
pair[1].val = htole16(val & 0xffff);
return zyd_cmd(sc, ZYD_CMD_IOWR, pair, sizeof(pair), NULL, 0, 0);
}
static int
zyd_rfwrite(struct zyd_softc *sc, uint32_t val)
{
struct zyd_rf *rf = &sc->sc_rf;
struct zyd_rfwrite_cmd req;
uint16_t cr203;
int error, i;
zyd_read16_m(sc, ZYD_CR203, &cr203);
cr203 &= ~(ZYD_RF_IF_LE | ZYD_RF_CLK | ZYD_RF_DATA);
req.code = htole16(2);
req.width = htole16(rf->width);
for (i = 0; i < rf->width; i++) {
req.bit[i] = htole16(cr203);
if (val & (1 << (rf->width - 1 - i)))
req.bit[i] |= htole16(ZYD_RF_DATA);
}
error = zyd_cmd(sc, ZYD_CMD_RFCFG, &req, 4 + 2 * rf->width, NULL, 0, 0);
fail:
return (error);
}
static int
zyd_rfwrite_cr(struct zyd_softc *sc, uint32_t val)
{
int error;
zyd_write16_m(sc, ZYD_CR244, (val >> 16) & 0xff);
zyd_write16_m(sc, ZYD_CR243, (val >> 8) & 0xff);
zyd_write16_m(sc, ZYD_CR242, (val >> 0) & 0xff);
fail:
return (error);
}
static int
zyd_lock_phy(struct zyd_softc *sc)
{
int error;
uint32_t tmp;
zyd_read32_m(sc, ZYD_MAC_MISC, &tmp);
tmp &= ~ZYD_UNLOCK_PHY_REGS;
zyd_write32_m(sc, ZYD_MAC_MISC, tmp);
fail:
return (error);
}
static int
zyd_unlock_phy(struct zyd_softc *sc)
{
int error;
uint32_t tmp;
zyd_read32_m(sc, ZYD_MAC_MISC, &tmp);
tmp |= ZYD_UNLOCK_PHY_REGS;
zyd_write32_m(sc, ZYD_MAC_MISC, tmp);
fail:
return (error);
}
/*
* RFMD RF methods.
*/
static int
zyd_rfmd_init(struct zyd_rf *rf)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phyini[] = ZYD_RFMD_PHY;
static const uint32_t rfini[] = ZYD_RFMD_RF;
int i, error;
/* init RF-dependent PHY registers */
for (i = 0; i < N(phyini); i++) {
zyd_write16_m(sc, phyini[i].reg, phyini[i].val);
}
/* init RFMD radio */
for (i = 0; i < N(rfini); i++) {
if ((error = zyd_rfwrite(sc, rfini[i])) != 0)
return (error);
}
fail:
return (error);
#undef N
}
static int
zyd_rfmd_switch_radio(struct zyd_rf *rf, int on)
{
int error;
struct zyd_softc *sc = rf->rf_sc;
zyd_write16_m(sc, ZYD_CR10, on ? 0x89 : 0x15);
zyd_write16_m(sc, ZYD_CR11, on ? 0x00 : 0x81);
fail:
return (error);
}
static int
zyd_rfmd_set_channel(struct zyd_rf *rf, uint8_t chan)
{
int error;
struct zyd_softc *sc = rf->rf_sc;
static const struct {
uint32_t r1, r2;
} rfprog[] = ZYD_RFMD_CHANTABLE;
error = zyd_rfwrite(sc, rfprog[chan - 1].r1);
if (error != 0)
goto fail;
error = zyd_rfwrite(sc, rfprog[chan - 1].r2);
if (error != 0)
goto fail;
fail:
return (error);
}
/*
* AL2230 RF methods.
*/
static int
zyd_al2230_init(struct zyd_rf *rf)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phyini[] = ZYD_AL2230_PHY;
static const struct zyd_phy_pair phy2230s[] = ZYD_AL2230S_PHY_INIT;
static const struct zyd_phy_pair phypll[] = {
{ ZYD_CR251, 0x2f }, { ZYD_CR251, 0x3f },
{ ZYD_CR138, 0x28 }, { ZYD_CR203, 0x06 }
};
static const uint32_t rfini1[] = ZYD_AL2230_RF_PART1;
static const uint32_t rfini2[] = ZYD_AL2230_RF_PART2;
static const uint32_t rfini3[] = ZYD_AL2230_RF_PART3;
int i, error;
/* init RF-dependent PHY registers */
for (i = 0; i < N(phyini); i++)
zyd_write16_m(sc, phyini[i].reg, phyini[i].val);
if (sc->sc_rfrev == ZYD_RF_AL2230S || sc->sc_al2230s != 0) {
for (i = 0; i < N(phy2230s); i++)
zyd_write16_m(sc, phy2230s[i].reg, phy2230s[i].val);
}
/* init AL2230 radio */
for (i = 0; i < N(rfini1); i++) {
error = zyd_rfwrite(sc, rfini1[i]);
if (error != 0)
goto fail;
}
if (sc->sc_rfrev == ZYD_RF_AL2230S || sc->sc_al2230s != 0)
error = zyd_rfwrite(sc, 0x000824);
else
error = zyd_rfwrite(sc, 0x0005a4);
if (error != 0)
goto fail;
for (i = 0; i < N(rfini2); i++) {
error = zyd_rfwrite(sc, rfini2[i]);
if (error != 0)
goto fail;
}
for (i = 0; i < N(phypll); i++)
zyd_write16_m(sc, phypll[i].reg, phypll[i].val);
for (i = 0; i < N(rfini3); i++) {
error = zyd_rfwrite(sc, rfini3[i]);
if (error != 0)
goto fail;
}
fail:
return (error);
#undef N
}
static int
zyd_al2230_fini(struct zyd_rf *rf)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
int error, i;
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phy[] = ZYD_AL2230_PHY_FINI_PART1;
for (i = 0; i < N(phy); i++)
zyd_write16_m(sc, phy[i].reg, phy[i].val);
if (sc->sc_newphy != 0)
zyd_write16_m(sc, ZYD_CR9, 0xe1);
zyd_write16_m(sc, ZYD_CR203, 0x6);
fail:
return (error);
#undef N
}
static int
zyd_al2230_init_b(struct zyd_rf *rf)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phy1[] = ZYD_AL2230_PHY_PART1;
static const struct zyd_phy_pair phy2[] = ZYD_AL2230_PHY_PART2;
static const struct zyd_phy_pair phy3[] = ZYD_AL2230_PHY_PART3;
static const struct zyd_phy_pair phy2230s[] = ZYD_AL2230S_PHY_INIT;
static const struct zyd_phy_pair phyini[] = ZYD_AL2230_PHY_B;
static const uint32_t rfini_part1[] = ZYD_AL2230_RF_B_PART1;
static const uint32_t rfini_part2[] = ZYD_AL2230_RF_B_PART2;
static const uint32_t rfini_part3[] = ZYD_AL2230_RF_B_PART3;
static const uint32_t zyd_al2230_chtable[][3] = ZYD_AL2230_CHANTABLE;
int i, error;
for (i = 0; i < N(phy1); i++)
zyd_write16_m(sc, phy1[i].reg, phy1[i].val);
/* init RF-dependent PHY registers */
for (i = 0; i < N(phyini); i++)
zyd_write16_m(sc, phyini[i].reg, phyini[i].val);
if (sc->sc_rfrev == ZYD_RF_AL2230S || sc->sc_al2230s != 0) {
for (i = 0; i < N(phy2230s); i++)
zyd_write16_m(sc, phy2230s[i].reg, phy2230s[i].val);
}
for (i = 0; i < 3; i++) {
error = zyd_rfwrite_cr(sc, zyd_al2230_chtable[0][i]);
if (error != 0)
return (error);
}
for (i = 0; i < N(rfini_part1); i++) {
error = zyd_rfwrite_cr(sc, rfini_part1[i]);
if (error != 0)
return (error);
}
if (sc->sc_rfrev == ZYD_RF_AL2230S || sc->sc_al2230s != 0)
error = zyd_rfwrite(sc, 0x241000);
else
error = zyd_rfwrite(sc, 0x25a000);
if (error != 0)
goto fail;
for (i = 0; i < N(rfini_part2); i++) {
error = zyd_rfwrite_cr(sc, rfini_part2[i]);
if (error != 0)
return (error);
}
for (i = 0; i < N(phy2); i++)
zyd_write16_m(sc, phy2[i].reg, phy2[i].val);
for (i = 0; i < N(rfini_part3); i++) {
error = zyd_rfwrite_cr(sc, rfini_part3[i]);
if (error != 0)
return (error);
}
for (i = 0; i < N(phy3); i++)
zyd_write16_m(sc, phy3[i].reg, phy3[i].val);
error = zyd_al2230_fini(rf);
fail:
return (error);
#undef N
}
static int
zyd_al2230_switch_radio(struct zyd_rf *rf, int on)
{
struct zyd_softc *sc = rf->rf_sc;
int error, on251 = (sc->sc_macrev == ZYD_ZD1211) ? 0x3f : 0x7f;
zyd_write16_m(sc, ZYD_CR11, on ? 0x00 : 0x04);
zyd_write16_m(sc, ZYD_CR251, on ? on251 : 0x2f);
fail:
return (error);
}
static int
zyd_al2230_set_channel(struct zyd_rf *rf, uint8_t chan)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
int error, i;
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phy1[] = {
{ ZYD_CR138, 0x28 }, { ZYD_CR203, 0x06 },
};
static const struct {
uint32_t r1, r2, r3;
} rfprog[] = ZYD_AL2230_CHANTABLE;
error = zyd_rfwrite(sc, rfprog[chan - 1].r1);
if (error != 0)
goto fail;
error = zyd_rfwrite(sc, rfprog[chan - 1].r2);
if (error != 0)
goto fail;
error = zyd_rfwrite(sc, rfprog[chan - 1].r3);
if (error != 0)
goto fail;
for (i = 0; i < N(phy1); i++)
zyd_write16_m(sc, phy1[i].reg, phy1[i].val);
fail:
return (error);
#undef N
}
static int
zyd_al2230_set_channel_b(struct zyd_rf *rf, uint8_t chan)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
int error, i;
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phy1[] = ZYD_AL2230_PHY_PART1;
static const struct {
uint32_t r1, r2, r3;
} rfprog[] = ZYD_AL2230_CHANTABLE_B;
for (i = 0; i < N(phy1); i++)
zyd_write16_m(sc, phy1[i].reg, phy1[i].val);
error = zyd_rfwrite_cr(sc, rfprog[chan - 1].r1);
if (error != 0)
goto fail;
error = zyd_rfwrite_cr(sc, rfprog[chan - 1].r2);
if (error != 0)
goto fail;
error = zyd_rfwrite_cr(sc, rfprog[chan - 1].r3);
if (error != 0)
goto fail;
error = zyd_al2230_fini(rf);
fail:
return (error);
#undef N
}
#define ZYD_AL2230_PHY_BANDEDGE6 \
{ \
{ ZYD_CR128, 0x14 }, { ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, \
{ ZYD_CR47, 0x1e } \
}
static int
zyd_al2230_bandedge6(struct zyd_rf *rf, struct ieee80211_channel *c)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
int error = 0, i;
struct zyd_softc *sc = rf->rf_sc;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct zyd_phy_pair r[] = ZYD_AL2230_PHY_BANDEDGE6;
int chan = ieee80211_chan2ieee(ic, c);
if (chan == 1 || chan == 11)
r[0].val = 0x12;
for (i = 0; i < N(r); i++)
zyd_write16_m(sc, r[i].reg, r[i].val);
fail:
return (error);
#undef N
}
/*
* AL7230B RF methods.
*/
static int
zyd_al7230B_init(struct zyd_rf *rf)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phyini_1[] = ZYD_AL7230B_PHY_1;
static const struct zyd_phy_pair phyini_2[] = ZYD_AL7230B_PHY_2;
static const struct zyd_phy_pair phyini_3[] = ZYD_AL7230B_PHY_3;
static const uint32_t rfini_1[] = ZYD_AL7230B_RF_1;
static const uint32_t rfini_2[] = ZYD_AL7230B_RF_2;
int i, error;
/* for AL7230B, PHY and RF need to be initialized in "phases" */
/* init RF-dependent PHY registers, part one */
for (i = 0; i < N(phyini_1); i++)
zyd_write16_m(sc, phyini_1[i].reg, phyini_1[i].val);
/* init AL7230B radio, part one */
for (i = 0; i < N(rfini_1); i++) {
if ((error = zyd_rfwrite(sc, rfini_1[i])) != 0)
return (error);
}
/* init RF-dependent PHY registers, part two */
for (i = 0; i < N(phyini_2); i++)
zyd_write16_m(sc, phyini_2[i].reg, phyini_2[i].val);
/* init AL7230B radio, part two */
for (i = 0; i < N(rfini_2); i++) {
if ((error = zyd_rfwrite(sc, rfini_2[i])) != 0)
return (error);
}
/* init RF-dependent PHY registers, part three */
for (i = 0; i < N(phyini_3); i++)
zyd_write16_m(sc, phyini_3[i].reg, phyini_3[i].val);
fail:
return (error);
#undef N
}
static int
zyd_al7230B_switch_radio(struct zyd_rf *rf, int on)
{
int error;
struct zyd_softc *sc = rf->rf_sc;
zyd_write16_m(sc, ZYD_CR11, on ? 0x00 : 0x04);
zyd_write16_m(sc, ZYD_CR251, on ? 0x3f : 0x2f);
fail:
return (error);
}
static int
zyd_al7230B_set_channel(struct zyd_rf *rf, uint8_t chan)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const struct {
uint32_t r1, r2;
} rfprog[] = ZYD_AL7230B_CHANTABLE;
static const uint32_t rfsc[] = ZYD_AL7230B_RF_SETCHANNEL;
int i, error;
zyd_write16_m(sc, ZYD_CR240, 0x57);
zyd_write16_m(sc, ZYD_CR251, 0x2f);
for (i = 0; i < N(rfsc); i++) {
if ((error = zyd_rfwrite(sc, rfsc[i])) != 0)
return (error);
}
zyd_write16_m(sc, ZYD_CR128, 0x14);
zyd_write16_m(sc, ZYD_CR129, 0x12);
zyd_write16_m(sc, ZYD_CR130, 0x10);
zyd_write16_m(sc, ZYD_CR38, 0x38);
zyd_write16_m(sc, ZYD_CR136, 0xdf);
error = zyd_rfwrite(sc, rfprog[chan - 1].r1);
if (error != 0)
goto fail;
error = zyd_rfwrite(sc, rfprog[chan - 1].r2);
if (error != 0)
goto fail;
error = zyd_rfwrite(sc, 0x3c9000);
if (error != 0)
goto fail;
zyd_write16_m(sc, ZYD_CR251, 0x3f);
zyd_write16_m(sc, ZYD_CR203, 0x06);
zyd_write16_m(sc, ZYD_CR240, 0x08);
fail:
return (error);
#undef N
}
/*
* AL2210 RF methods.
*/
static int
zyd_al2210_init(struct zyd_rf *rf)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phyini[] = ZYD_AL2210_PHY;
static const uint32_t rfini[] = ZYD_AL2210_RF;
uint32_t tmp;
int i, error;
zyd_write32_m(sc, ZYD_CR18, 2);
/* init RF-dependent PHY registers */
for (i = 0; i < N(phyini); i++)
zyd_write16_m(sc, phyini[i].reg, phyini[i].val);
/* init AL2210 radio */
for (i = 0; i < N(rfini); i++) {
if ((error = zyd_rfwrite(sc, rfini[i])) != 0)
return (error);
}
zyd_write16_m(sc, ZYD_CR47, 0x1e);
zyd_read32_m(sc, ZYD_CR_RADIO_PD, &tmp);
zyd_write32_m(sc, ZYD_CR_RADIO_PD, tmp & ~1);
zyd_write32_m(sc, ZYD_CR_RADIO_PD, tmp | 1);
zyd_write32_m(sc, ZYD_CR_RFCFG, 0x05);
zyd_write32_m(sc, ZYD_CR_RFCFG, 0x00);
zyd_write16_m(sc, ZYD_CR47, 0x1e);
zyd_write32_m(sc, ZYD_CR18, 3);
fail:
return (error);
#undef N
}
static int
zyd_al2210_switch_radio(struct zyd_rf *rf, int on)
{
/* vendor driver does nothing for this RF chip */
return (0);
}
static int
zyd_al2210_set_channel(struct zyd_rf *rf, uint8_t chan)
{
int error;
struct zyd_softc *sc = rf->rf_sc;
static const uint32_t rfprog[] = ZYD_AL2210_CHANTABLE;
uint32_t tmp;
zyd_write32_m(sc, ZYD_CR18, 2);
zyd_write16_m(sc, ZYD_CR47, 0x1e);
zyd_read32_m(sc, ZYD_CR_RADIO_PD, &tmp);
zyd_write32_m(sc, ZYD_CR_RADIO_PD, tmp & ~1);
zyd_write32_m(sc, ZYD_CR_RADIO_PD, tmp | 1);
zyd_write32_m(sc, ZYD_CR_RFCFG, 0x05);
zyd_write32_m(sc, ZYD_CR_RFCFG, 0x00);
zyd_write16_m(sc, ZYD_CR47, 0x1e);
/* actually set the channel */
error = zyd_rfwrite(sc, rfprog[chan - 1]);
if (error != 0)
goto fail;
zyd_write32_m(sc, ZYD_CR18, 3);
fail:
return (error);
}
/*
* GCT RF methods.
*/
static int
zyd_gct_init(struct zyd_rf *rf)
{
#define ZYD_GCT_INTR_REG 0x85c1
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phyini[] = ZYD_GCT_PHY;
static const uint32_t rfini[] = ZYD_GCT_RF;
static const uint16_t vco[11][7] = ZYD_GCT_VCO;
int i, idx = -1, error;
uint16_t data;
/* init RF-dependent PHY registers */
for (i = 0; i < N(phyini); i++)
zyd_write16_m(sc, phyini[i].reg, phyini[i].val);
/* init cgt radio */
for (i = 0; i < N(rfini); i++) {
if ((error = zyd_rfwrite(sc, rfini[i])) != 0)
return (error);
}
error = zyd_gct_mode(rf);
if (error != 0)
return (error);
for (i = 0; i < (int)(N(vco) - 1); i++) {
error = zyd_gct_set_channel_synth(rf, 1, 0);
if (error != 0)
goto fail;
error = zyd_gct_write(rf, vco[i][0]);
if (error != 0)
goto fail;
zyd_write16_m(sc, ZYD_GCT_INTR_REG, 0xf);
zyd_read16_m(sc, ZYD_GCT_INTR_REG, &data);
if ((data & 0xf) == 0) {
idx = i;
break;
}
}
if (idx == -1) {
error = zyd_gct_set_channel_synth(rf, 1, 1);
if (error != 0)
goto fail;
error = zyd_gct_write(rf, 0x6662);
if (error != 0)
goto fail;
}
rf->idx = idx;
zyd_write16_m(sc, ZYD_CR203, 0x6);
fail:
return (error);
#undef N
#undef ZYD_GCT_INTR_REG
}
static int
zyd_gct_mode(struct zyd_rf *rf)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const uint32_t mode[] = {
0x25f98, 0x25f9a, 0x25f94, 0x27fd4
};
int i, error;
for (i = 0; i < N(mode); i++) {
if ((error = zyd_rfwrite(sc, mode[i])) != 0)
break;
}
return (error);
#undef N
}
static int
zyd_gct_set_channel_synth(struct zyd_rf *rf, int chan, int acal)
{
int error, idx = chan - 1;
struct zyd_softc *sc = rf->rf_sc;
static uint32_t acal_synth[] = ZYD_GCT_CHANNEL_ACAL;
static uint32_t std_synth[] = ZYD_GCT_CHANNEL_STD;
static uint32_t div_synth[] = ZYD_GCT_CHANNEL_DIV;
error = zyd_rfwrite(sc,
(acal == 1) ? acal_synth[idx] : std_synth[idx]);
if (error != 0)
return (error);
return zyd_rfwrite(sc, div_synth[idx]);
}
static int
zyd_gct_write(struct zyd_rf *rf, uint16_t value)
{
struct zyd_softc *sc = rf->rf_sc;
return zyd_rfwrite(sc, 0x300000 | 0x40000 | value);
}
static int
zyd_gct_switch_radio(struct zyd_rf *rf, int on)
{
int error;
struct zyd_softc *sc = rf->rf_sc;
error = zyd_rfwrite(sc, on ? 0x25f94 : 0x25f90);
if (error != 0)
return (error);
zyd_write16_m(sc, ZYD_CR11, on ? 0x00 : 0x04);
zyd_write16_m(sc, ZYD_CR251,
on ? ((sc->sc_macrev == ZYD_ZD1211B) ? 0x7f : 0x3f) : 0x2f);
fail:
return (error);
}
static int
zyd_gct_set_channel(struct zyd_rf *rf, uint8_t chan)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
int error, i;
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair cmd[] = {
{ ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR79, 0x58 },
{ ZYD_CR12, 0xf0 }, { ZYD_CR77, 0x1b }, { ZYD_CR78, 0x58 },
};
static const uint16_t vco[11][7] = ZYD_GCT_VCO;
error = zyd_gct_set_channel_synth(rf, chan, 0);
if (error != 0)
goto fail;
error = zyd_gct_write(rf, (rf->idx == -1) ? 0x6662 :
vco[rf->idx][((chan - 1) / 2)]);
if (error != 0)
goto fail;
error = zyd_gct_mode(rf);
if (error != 0)
return (error);
for (i = 0; i < N(cmd); i++)
zyd_write16_m(sc, cmd[i].reg, cmd[i].val);
error = zyd_gct_txgain(rf, chan);
if (error != 0)
return (error);
zyd_write16_m(sc, ZYD_CR203, 0x6);
fail:
return (error);
#undef N
}
static int
zyd_gct_txgain(struct zyd_rf *rf, uint8_t chan)
{
#define N(a) (sizeof(a) / sizeof((a)[0]))
struct zyd_softc *sc = rf->rf_sc;
static uint32_t txgain[] = ZYD_GCT_TXGAIN;
uint8_t idx = sc->sc_pwrint[chan - 1];
if (idx >= N(txgain)) {
device_printf(sc->sc_dev, "could not set TX gain (%d %#x)\n",
chan, idx);
return 0;
}
return zyd_rfwrite(sc, 0x700000 | txgain[idx]);
#undef N
}
/*
* Maxim2 RF methods.
*/
static int
zyd_maxim2_init(struct zyd_rf *rf)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phyini[] = ZYD_MAXIM2_PHY;
static const uint32_t rfini[] = ZYD_MAXIM2_RF;
uint16_t tmp;
int i, error;
/* init RF-dependent PHY registers */
for (i = 0; i < N(phyini); i++)
zyd_write16_m(sc, phyini[i].reg, phyini[i].val);
zyd_read16_m(sc, ZYD_CR203, &tmp);
zyd_write16_m(sc, ZYD_CR203, tmp & ~(1 << 4));
/* init maxim2 radio */
for (i = 0; i < N(rfini); i++) {
if ((error = zyd_rfwrite(sc, rfini[i])) != 0)
return (error);
}
zyd_read16_m(sc, ZYD_CR203, &tmp);
zyd_write16_m(sc, ZYD_CR203, tmp | (1 << 4));
fail:
return (error);
#undef N
}
static int
zyd_maxim2_switch_radio(struct zyd_rf *rf, int on)
{
/* vendor driver does nothing for this RF chip */
return (0);
}
static int
zyd_maxim2_set_channel(struct zyd_rf *rf, uint8_t chan)
{
#define N(a) ((int)(sizeof(a) / sizeof((a)[0])))
struct zyd_softc *sc = rf->rf_sc;
static const struct zyd_phy_pair phyini[] = ZYD_MAXIM2_PHY;
static const uint32_t rfini[] = ZYD_MAXIM2_RF;
static const struct {
uint32_t r1, r2;
} rfprog[] = ZYD_MAXIM2_CHANTABLE;
uint16_t tmp;
int i, error;
/*
* Do the same as we do when initializing it, except for the channel
* values coming from the two channel tables.
*/
/* init RF-dependent PHY registers */
for (i = 0; i < N(phyini); i++)
zyd_write16_m(sc, phyini[i].reg, phyini[i].val);
zyd_read16_m(sc, ZYD_CR203, &tmp);
zyd_write16_m(sc, ZYD_CR203, tmp & ~(1 << 4));
/* first two values taken from the chantables */
error = zyd_rfwrite(sc, rfprog[chan - 1].r1);
if (error != 0)
goto fail;
error = zyd_rfwrite(sc, rfprog[chan - 1].r2);
if (error != 0)
goto fail;
/* init maxim2 radio - skipping the two first values */
for (i = 2; i < N(rfini); i++) {
if ((error = zyd_rfwrite(sc, rfini[i])) != 0)
return (error);
}
zyd_read16_m(sc, ZYD_CR203, &tmp);
zyd_write16_m(sc, ZYD_CR203, tmp | (1 << 4));
fail:
return (error);
#undef N
}
static int
zyd_rf_attach(struct zyd_softc *sc, uint8_t type)
{
struct zyd_rf *rf = &sc->sc_rf;
rf->rf_sc = sc;
rf->update_pwr = 1;
switch (type) {
case ZYD_RF_RFMD:
rf->init = zyd_rfmd_init;
rf->switch_radio = zyd_rfmd_switch_radio;
rf->set_channel = zyd_rfmd_set_channel;
rf->width = 24; /* 24-bit RF values */
break;
case ZYD_RF_AL2230:
case ZYD_RF_AL2230S:
if (sc->sc_macrev == ZYD_ZD1211B) {
rf->init = zyd_al2230_init_b;
rf->set_channel = zyd_al2230_set_channel_b;
} else {
rf->init = zyd_al2230_init;
rf->set_channel = zyd_al2230_set_channel;
}
rf->switch_radio = zyd_al2230_switch_radio;
rf->bandedge6 = zyd_al2230_bandedge6;
rf->width = 24; /* 24-bit RF values */
break;
case ZYD_RF_AL7230B:
rf->init = zyd_al7230B_init;
rf->switch_radio = zyd_al7230B_switch_radio;
rf->set_channel = zyd_al7230B_set_channel;
rf->width = 24; /* 24-bit RF values */
break;
case ZYD_RF_AL2210:
rf->init = zyd_al2210_init;
rf->switch_radio = zyd_al2210_switch_radio;
rf->set_channel = zyd_al2210_set_channel;
rf->width = 24; /* 24-bit RF values */
break;
case ZYD_RF_MAXIM_NEW:
case ZYD_RF_GCT:
rf->init = zyd_gct_init;
rf->switch_radio = zyd_gct_switch_radio;
rf->set_channel = zyd_gct_set_channel;
rf->width = 24; /* 24-bit RF values */
rf->update_pwr = 0;
break;
case ZYD_RF_MAXIM_NEW2:
rf->init = zyd_maxim2_init;
rf->switch_radio = zyd_maxim2_switch_radio;
rf->set_channel = zyd_maxim2_set_channel;
rf->width = 18; /* 18-bit RF values */
break;
default:
device_printf(sc->sc_dev,
"sorry, radio \"%s\" is not supported yet\n",
zyd_rf_name(type));
return (EINVAL);
}
return (0);
}
static const char *
zyd_rf_name(uint8_t type)
{
static const char * const zyd_rfs[] = {
"unknown", "unknown", "UW2451", "UCHIP", "AL2230",
"AL7230B", "THETA", "AL2210", "MAXIM_NEW", "GCT",
"AL2230S", "RALINK", "INTERSIL", "RFMD", "MAXIM_NEW2",
"PHILIPS"
};
return zyd_rfs[(type > 15) ? 0 : type];
}
static int
zyd_hw_init(struct zyd_softc *sc)
{
int error;
const struct zyd_phy_pair *phyp;
struct zyd_rf *rf = &sc->sc_rf;
uint16_t val;
/* specify that the plug and play is finished */
zyd_write32_m(sc, ZYD_MAC_AFTER_PNP, 1);
zyd_read16_m(sc, ZYD_FIRMWARE_BASE_ADDR, &sc->sc_fwbase);
DPRINTF(sc, ZYD_DEBUG_FW, "firmware base address=0x%04x\n",
sc->sc_fwbase);
/* retrieve firmware revision number */
zyd_read16_m(sc, sc->sc_fwbase + ZYD_FW_FIRMWARE_REV, &sc->sc_fwrev);
zyd_write32_m(sc, ZYD_CR_GPI_EN, 0);
zyd_write32_m(sc, ZYD_MAC_CONT_WIN_LIMIT, 0x7f043f);
/* set mandatory rates - XXX assumes 802.11b/g */
zyd_write32_m(sc, ZYD_MAC_MAN_RATE, 0x150f);
/* disable interrupts */
zyd_write32_m(sc, ZYD_CR_INTERRUPT, 0);
if ((error = zyd_read_pod(sc)) != 0) {
device_printf(sc->sc_dev, "could not read EEPROM\n");
goto fail;
}
/* PHY init (resetting) */
error = zyd_lock_phy(sc);
if (error != 0)
goto fail;
phyp = (sc->sc_macrev == ZYD_ZD1211B) ? zyd_def_phyB : zyd_def_phy;
for (; phyp->reg != 0; phyp++)
zyd_write16_m(sc, phyp->reg, phyp->val);
if (sc->sc_macrev == ZYD_ZD1211 && sc->sc_fix_cr157 != 0) {
zyd_read16_m(sc, ZYD_EEPROM_PHY_REG, &val);
zyd_write32_m(sc, ZYD_CR157, val >> 8);
}
error = zyd_unlock_phy(sc);
if (error != 0)
goto fail;
/* HMAC init */
zyd_write32_m(sc, ZYD_MAC_ACK_EXT, 0x00000020);
zyd_write32_m(sc, ZYD_CR_ADDA_MBIAS_WT, 0x30000808);
zyd_write32_m(sc, ZYD_MAC_SNIFFER, 0x00000000);
zyd_write32_m(sc, ZYD_MAC_RXFILTER, 0x00000000);
zyd_write32_m(sc, ZYD_MAC_GHTBL, 0x00000000);
zyd_write32_m(sc, ZYD_MAC_GHTBH, 0x80000000);
zyd_write32_m(sc, ZYD_MAC_MISC, 0x000000a4);
zyd_write32_m(sc, ZYD_CR_ADDA_PWR_DWN, 0x0000007f);
zyd_write32_m(sc, ZYD_MAC_BCNCFG, 0x00f00401);
zyd_write32_m(sc, ZYD_MAC_PHY_DELAY2, 0x00000000);
zyd_write32_m(sc, ZYD_MAC_ACK_EXT, 0x00000080);
zyd_write32_m(sc, ZYD_CR_ADDA_PWR_DWN, 0x00000000);
zyd_write32_m(sc, ZYD_MAC_SIFS_ACK_TIME, 0x00000100);
zyd_write32_m(sc, ZYD_CR_RX_PE_DELAY, 0x00000070);
zyd_write32_m(sc, ZYD_CR_PS_CTRL, 0x10000000);
zyd_write32_m(sc, ZYD_MAC_RTSCTSRATE, 0x02030203);
zyd_write32_m(sc, ZYD_MAC_AFTER_PNP, 1);
zyd_write32_m(sc, ZYD_MAC_BACKOFF_PROTECT, 0x00000114);
zyd_write32_m(sc, ZYD_MAC_DIFS_EIFS_SIFS, 0x0a47c032);
zyd_write32_m(sc, ZYD_MAC_CAM_MODE, 0x3);
if (sc->sc_macrev == ZYD_ZD1211) {
zyd_write32_m(sc, ZYD_MAC_RETRY, 0x00000002);
zyd_write32_m(sc, ZYD_MAC_RX_THRESHOLD, 0x000c0640);
} else {
zyd_write32_m(sc, ZYD_MACB_MAX_RETRY, 0x02020202);
zyd_write32_m(sc, ZYD_MACB_TXPWR_CTL4, 0x007f003f);
zyd_write32_m(sc, ZYD_MACB_TXPWR_CTL3, 0x007f003f);
zyd_write32_m(sc, ZYD_MACB_TXPWR_CTL2, 0x003f001f);
zyd_write32_m(sc, ZYD_MACB_TXPWR_CTL1, 0x001f000f);
zyd_write32_m(sc, ZYD_MACB_AIFS_CTL1, 0x00280028);
zyd_write32_m(sc, ZYD_MACB_AIFS_CTL2, 0x008C003C);
zyd_write32_m(sc, ZYD_MACB_TXOP, 0x01800824);
zyd_write32_m(sc, ZYD_MAC_RX_THRESHOLD, 0x000c0eff);
}
/* init beacon interval to 100ms */
if ((error = zyd_set_beacon_interval(sc, 100)) != 0)
goto fail;
if ((error = zyd_rf_attach(sc, sc->sc_rfrev)) != 0) {
device_printf(sc->sc_dev, "could not attach RF, rev 0x%x\n",
sc->sc_rfrev);
goto fail;
}
/* RF chip init */
error = zyd_lock_phy(sc);
if (error != 0)
goto fail;
error = (*rf->init)(rf);
if (error != 0) {
device_printf(sc->sc_dev,
"radio initialization failed, error %d\n", error);
goto fail;
}
error = zyd_unlock_phy(sc);
if (error != 0)
goto fail;
if ((error = zyd_read_eeprom(sc)) != 0) {
device_printf(sc->sc_dev, "could not read EEPROM\n");
goto fail;
}
fail: return (error);
}
static int
zyd_read_pod(struct zyd_softc *sc)
{
int error;
uint32_t tmp;
zyd_read32_m(sc, ZYD_EEPROM_POD, &tmp);
sc->sc_rfrev = tmp & 0x0f;
sc->sc_ledtype = (tmp >> 4) & 0x01;
sc->sc_al2230s = (tmp >> 7) & 0x01;
sc->sc_cckgain = (tmp >> 8) & 0x01;
sc->sc_fix_cr157 = (tmp >> 13) & 0x01;
sc->sc_parev = (tmp >> 16) & 0x0f;
sc->sc_bandedge6 = (tmp >> 21) & 0x01;
sc->sc_newphy = (tmp >> 31) & 0x01;
sc->sc_txled = ((tmp & (1 << 24)) && (tmp & (1 << 29))) ? 0 : 1;
fail:
return (error);
}
static int
zyd_read_eeprom(struct zyd_softc *sc)
{
uint16_t val;
int error, i;
/* read Tx power calibration tables */
for (i = 0; i < 7; i++) {
zyd_read16_m(sc, ZYD_EEPROM_PWR_CAL + i, &val);
sc->sc_pwrcal[i * 2] = val >> 8;
sc->sc_pwrcal[i * 2 + 1] = val & 0xff;
zyd_read16_m(sc, ZYD_EEPROM_PWR_INT + i, &val);
sc->sc_pwrint[i * 2] = val >> 8;
sc->sc_pwrint[i * 2 + 1] = val & 0xff;
zyd_read16_m(sc, ZYD_EEPROM_36M_CAL + i, &val);
sc->sc_ofdm36_cal[i * 2] = val >> 8;
sc->sc_ofdm36_cal[i * 2 + 1] = val & 0xff;
zyd_read16_m(sc, ZYD_EEPROM_48M_CAL + i, &val);
sc->sc_ofdm48_cal[i * 2] = val >> 8;
sc->sc_ofdm48_cal[i * 2 + 1] = val & 0xff;
zyd_read16_m(sc, ZYD_EEPROM_54M_CAL + i, &val);
sc->sc_ofdm54_cal[i * 2] = val >> 8;
sc->sc_ofdm54_cal[i * 2 + 1] = val & 0xff;
}
fail:
return (error);
}
static int
zyd_get_macaddr(struct zyd_softc *sc)
{
struct usb_device_request req;
usb_error_t error;
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = ZYD_READFWDATAREQ;
USETW(req.wValue, ZYD_EEPROM_MAC_ADDR_P1);
USETW(req.wIndex, 0);
USETW(req.wLength, IEEE80211_ADDR_LEN);
- error = zyd_do_request(sc, &req, sc->sc_bssid);
+ error = zyd_do_request(sc, &req, sc->sc_ic.ic_macaddr);
if (error != 0) {
device_printf(sc->sc_dev, "could not read EEPROM: %s\n",
usbd_errstr(error));
}
return (error);
}
static int
zyd_set_macaddr(struct zyd_softc *sc, const uint8_t *addr)
{
int error;
uint32_t tmp;
tmp = addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0];
zyd_write32_m(sc, ZYD_MAC_MACADRL, tmp);
tmp = addr[5] << 8 | addr[4];
zyd_write32_m(sc, ZYD_MAC_MACADRH, tmp);
fail:
return (error);
}
static int
zyd_set_bssid(struct zyd_softc *sc, const uint8_t *addr)
{
int error;
uint32_t tmp;
tmp = addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0];
zyd_write32_m(sc, ZYD_MAC_BSSADRL, tmp);
tmp = addr[5] << 8 | addr[4];
zyd_write32_m(sc, ZYD_MAC_BSSADRH, tmp);
fail:
return (error);
}
static int
zyd_switch_radio(struct zyd_softc *sc, int on)
{
struct zyd_rf *rf = &sc->sc_rf;
int error;
error = zyd_lock_phy(sc);
if (error != 0)
goto fail;
error = (*rf->switch_radio)(rf, on);
if (error != 0)
goto fail;
error = zyd_unlock_phy(sc);
fail:
return (error);
}
static int
zyd_set_led(struct zyd_softc *sc, int which, int on)
{
int error;
uint32_t tmp;
zyd_read32_m(sc, ZYD_MAC_TX_PE_CONTROL, &tmp);
tmp &= ~which;
if (on)
tmp |= which;
zyd_write32_m(sc, ZYD_MAC_TX_PE_CONTROL, tmp);
fail:
return (error);
}
static void
zyd_set_multi(struct zyd_softc *sc)
{
- int error;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifmultiaddr *ifma;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t low, high;
- uint8_t v;
+ int error;
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ if ((sc->sc_flags & ZYD_FLAG_RUNNING) == 0)
return;
low = 0x00000000;
high = 0x80000000;
- if (ic->ic_opmode == IEEE80211_M_MONITOR ||
- (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC))) {
+ if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_allmulti > 0 ||
+ ic->ic_promisc > 0) {
low = 0xffffffff;
high = 0xffffffff;
} else {
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- v = ((uint8_t *)LLADDR((struct sockaddr_dl *)
- ifma->ifma_addr))[5] >> 2;
- if (v < 32)
- low |= 1 << v;
- else
- high |= 1 << (v - 32);
+ struct ieee80211vap *vap;
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
+ uint8_t v;
+
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ ifp = vap->iv_ifp;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ v = ((uint8_t *)LLADDR((struct sockaddr_dl *)
+ ifma->ifma_addr))[5] >> 2;
+ if (v < 32)
+ low |= 1 << v;
+ else
+ high |= 1 << (v - 32);
+ }
+ if_maddr_runlock(ifp);
}
- if_maddr_runlock(ifp);
}
/* reprogram multicast global hash table */
zyd_write32_m(sc, ZYD_MAC_GHTBL, low);
zyd_write32_m(sc, ZYD_MAC_GHTBH, high);
fail:
if (error != 0)
device_printf(sc->sc_dev,
"could not set multicast hash table\n");
}
static void
zyd_update_mcast(struct ieee80211com *ic)
{
struct zyd_softc *sc = ic->ic_softc;
- if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
ZYD_LOCK(sc);
zyd_set_multi(sc);
ZYD_UNLOCK(sc);
}
static int
zyd_set_rxfilter(struct zyd_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
uint32_t rxfilter;
switch (ic->ic_opmode) {
case IEEE80211_M_STA:
rxfilter = ZYD_FILTER_BSS;
break;
case IEEE80211_M_IBSS:
case IEEE80211_M_HOSTAP:
rxfilter = ZYD_FILTER_HOSTAP;
break;
case IEEE80211_M_MONITOR:
rxfilter = ZYD_FILTER_MONITOR;
break;
default:
/* should not get there */
return (EINVAL);
}
return zyd_write32(sc, ZYD_MAC_RXFILTER, rxfilter);
}
static void
zyd_set_chan(struct zyd_softc *sc, struct ieee80211_channel *c)
{
int error;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct zyd_rf *rf = &sc->sc_rf;
uint32_t tmp;
int chan;
chan = ieee80211_chan2ieee(ic, c);
if (chan == 0 || chan == IEEE80211_CHAN_ANY) {
/* XXX should NEVER happen */
device_printf(sc->sc_dev,
"%s: invalid channel %x\n", __func__, chan);
return;
}
error = zyd_lock_phy(sc);
if (error != 0)
goto fail;
error = (*rf->set_channel)(rf, chan);
if (error != 0)
goto fail;
if (rf->update_pwr) {
/* update Tx power */
zyd_write16_m(sc, ZYD_CR31, sc->sc_pwrint[chan - 1]);
if (sc->sc_macrev == ZYD_ZD1211B) {
zyd_write16_m(sc, ZYD_CR67,
sc->sc_ofdm36_cal[chan - 1]);
zyd_write16_m(sc, ZYD_CR66,
sc->sc_ofdm48_cal[chan - 1]);
zyd_write16_m(sc, ZYD_CR65,
sc->sc_ofdm54_cal[chan - 1]);
zyd_write16_m(sc, ZYD_CR68, sc->sc_pwrcal[chan - 1]);
zyd_write16_m(sc, ZYD_CR69, 0x28);
zyd_write16_m(sc, ZYD_CR69, 0x2a);
}
}
if (sc->sc_cckgain) {
/* set CCK baseband gain from EEPROM */
if (zyd_read32(sc, ZYD_EEPROM_PHY_REG, &tmp) == 0)
zyd_write16_m(sc, ZYD_CR47, tmp & 0xff);
}
if (sc->sc_bandedge6 && rf->bandedge6 != NULL) {
error = (*rf->bandedge6)(rf, c);
if (error != 0)
goto fail;
}
zyd_write32_m(sc, ZYD_CR_CONFIG_PHILIPS, 0);
error = zyd_unlock_phy(sc);
if (error != 0)
goto fail;
sc->sc_rxtap.wr_chan_freq = sc->sc_txtap.wt_chan_freq =
htole16(c->ic_freq);
sc->sc_rxtap.wr_chan_flags = sc->sc_txtap.wt_chan_flags =
htole16(c->ic_flags);
fail:
return;
}
static int
zyd_set_beacon_interval(struct zyd_softc *sc, int bintval)
{
int error;
uint32_t val;
zyd_read32_m(sc, ZYD_CR_ATIM_WND_PERIOD, &val);
sc->sc_atim_wnd = val;
zyd_read32_m(sc, ZYD_CR_PRE_TBTT, &val);
sc->sc_pre_tbtt = val;
sc->sc_bcn_int = bintval;
if (sc->sc_bcn_int <= 5)
sc->sc_bcn_int = 5;
if (sc->sc_pre_tbtt < 4 || sc->sc_pre_tbtt >= sc->sc_bcn_int)
sc->sc_pre_tbtt = sc->sc_bcn_int - 1;
if (sc->sc_atim_wnd >= sc->sc_pre_tbtt)
sc->sc_atim_wnd = sc->sc_pre_tbtt - 1;
zyd_write32_m(sc, ZYD_CR_ATIM_WND_PERIOD, sc->sc_atim_wnd);
zyd_write32_m(sc, ZYD_CR_PRE_TBTT, sc->sc_pre_tbtt);
zyd_write32_m(sc, ZYD_CR_BCN_INTERVAL, sc->sc_bcn_int);
fail:
return (error);
}
static void
zyd_rx_data(struct usb_xfer *xfer, int offset, uint16_t len)
{
struct zyd_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct zyd_plcphdr plcp;
struct zyd_rx_stat stat;
struct usb_page_cache *pc;
struct mbuf *m;
int rlen, rssi;
if (len < ZYD_MIN_FRAGSZ) {
DPRINTF(sc, ZYD_DEBUG_RECV, "%s: frame too short (length=%d)\n",
device_get_nameunit(sc->sc_dev), len);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, offset, &plcp, sizeof(plcp));
usbd_copy_out(pc, offset + len - sizeof(stat), &stat, sizeof(stat));
if (stat.flags & ZYD_RX_ERROR) {
DPRINTF(sc, ZYD_DEBUG_RECV,
"%s: RX status indicated error (%x)\n",
device_get_nameunit(sc->sc_dev), stat.flags);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
/* compute actual frame length */
rlen = len - sizeof(struct zyd_plcphdr) -
sizeof(struct zyd_rx_stat) - IEEE80211_CRC_LEN;
/* allocate a mbuf to store the frame */
if (rlen > (int)MCLBYTES) {
DPRINTF(sc, ZYD_DEBUG_RECV, "%s: frame too long (length=%d)\n",
device_get_nameunit(sc->sc_dev), rlen);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
} else if (rlen > (int)MHLEN)
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
else
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
DPRINTF(sc, ZYD_DEBUG_RECV, "%s: could not allocate rx mbuf\n",
device_get_nameunit(sc->sc_dev));
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
return;
}
- m->m_pkthdr.rcvif = ifp;
m->m_pkthdr.len = m->m_len = rlen;
usbd_copy_out(pc, offset + sizeof(plcp), mtod(m, uint8_t *), rlen);
if (ieee80211_radiotap_active(ic)) {
struct zyd_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
if (stat.flags & (ZYD_RX_BADCRC16 | ZYD_RX_BADCRC32))
tap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
/* XXX toss, no way to express errors */
if (stat.flags & ZYD_RX_DECRYPTERR)
tap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
tap->wr_rate = ieee80211_plcp2rate(plcp.signal,
(stat.flags & ZYD_RX_OFDM) ?
IEEE80211_T_OFDM : IEEE80211_T_CCK);
tap->wr_antsignal = stat.rssi + -95;
tap->wr_antnoise = -95; /* XXX */
}
rssi = (stat.rssi > 63) ? 127 : 2 * stat.rssi;
sc->sc_rx_data[sc->sc_rx_count].rssi = rssi;
sc->sc_rx_data[sc->sc_rx_count].m = m;
sc->sc_rx_count++;
}
static void
zyd_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct zyd_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
struct zyd_rx_desc desc;
struct mbuf *m;
struct usb_page_cache *pc;
uint32_t offset;
uint8_t rssi;
int8_t nf;
int i;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
sc->sc_rx_count = 0;
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, actlen - sizeof(desc), &desc, sizeof(desc));
offset = 0;
if (UGETW(desc.tag) == ZYD_TAG_MULTIFRAME) {
DPRINTF(sc, ZYD_DEBUG_RECV,
"%s: received multi-frame transfer\n", __func__);
for (i = 0; i < ZYD_MAX_RXFRAMECNT; i++) {
uint16_t len16 = UGETW(desc.len[i]);
if (len16 == 0 || len16 > actlen)
break;
zyd_rx_data(xfer, offset, len16);
/* next frame is aligned on a 32-bit boundary */
len16 = (len16 + 3) & ~3;
offset += len16;
if (len16 > actlen)
break;
actlen -= len16;
}
} else {
DPRINTF(sc, ZYD_DEBUG_RECV,
"%s: received single-frame transfer\n", __func__);
zyd_rx_data(xfer, 0, actlen);
}
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
usbd_transfer_submit(xfer);
/*
* At the end of a USB callback it is always safe to unlock
* the private mutex of a device! That is why we do the
* "ieee80211_input" here, and not some lines up!
*/
ZYD_UNLOCK(sc);
for (i = 0; i < sc->sc_rx_count; i++) {
rssi = sc->sc_rx_data[i].rssi;
m = sc->sc_rx_data[i].m;
sc->sc_rx_data[i].m = NULL;
nf = -95; /* XXX */
ni = ieee80211_find_rxnode(ic,
mtod(m, struct ieee80211_frame_min *));
if (ni != NULL) {
(void)ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void)ieee80211_input_all(ic, m, rssi, nf);
}
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
- !IFQ_IS_EMPTY(&ifp->if_snd))
- zyd_start(ifp);
ZYD_LOCK(sc);
+ zyd_start(sc);
break;
default: /* Error */
DPRINTF(sc, ZYD_DEBUG_ANY, "frame error: %s\n", usbd_errstr(error));
if (error != USB_ERR_CANCELLED) {
/* try to clear stall first */
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static uint8_t
zyd_plcp_signal(struct zyd_softc *sc, int rate)
{
switch (rate) {
/* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */
case 12:
return (0xb);
case 18:
return (0xf);
case 24:
return (0xa);
case 36:
return (0xe);
case 48:
return (0x9);
case 72:
return (0xd);
case 96:
return (0x8);
case 108:
return (0xc);
/* CCK rates (NB: not IEEE std, device-specific) */
case 2:
return (0x0);
case 4:
return (0x1);
case 11:
return (0x2);
case 22:
return (0x3);
}
device_printf(sc->sc_dev, "unsupported rate %d\n", rate);
return (0x0);
}
static void
zyd_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct zyd_softc *sc = usbd_xfer_softc(xfer);
- struct ifnet *ifp = sc->sc_ifp;
struct ieee80211vap *vap;
struct zyd_tx_data *data;
struct mbuf *m;
struct usb_page_cache *pc;
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
DPRINTF(sc, ZYD_DEBUG_ANY, "transfer complete, %u bytes\n",
actlen);
/* free resources */
data = usbd_xfer_get_priv(xfer);
zyd_tx_free(data, 0);
usbd_xfer_set_priv(xfer, NULL);
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
-
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
data = STAILQ_FIRST(&sc->tx_q);
if (data) {
STAILQ_REMOVE_HEAD(&sc->tx_q, next);
m = data->m;
if (m->m_pkthdr.len > (int)ZYD_MAX_TXBUFSZ) {
DPRINTF(sc, ZYD_DEBUG_ANY, "data overflow, %u bytes\n",
m->m_pkthdr.len);
m->m_pkthdr.len = ZYD_MAX_TXBUFSZ;
}
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_in(pc, 0, &data->desc, ZYD_TX_DESC_SIZE);
usbd_m_copy_in(pc, ZYD_TX_DESC_SIZE, m, 0,
m->m_pkthdr.len);
vap = data->ni->ni_vap;
if (ieee80211_radiotap_active_vap(vap)) {
struct zyd_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = data->rate;
ieee80211_radiotap_tx(vap, m);
}
usbd_xfer_set_frame_len(xfer, 0, ZYD_TX_DESC_SIZE + m->m_pkthdr.len);
usbd_xfer_set_priv(xfer, data);
usbd_transfer_submit(xfer);
}
- ZYD_UNLOCK(sc);
- zyd_start(ifp);
- ZYD_LOCK(sc);
+ zyd_start(sc);
break;
default: /* Error */
DPRINTF(sc, ZYD_DEBUG_ANY, "transfer error, %s\n",
usbd_errstr(error));
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
data = usbd_xfer_get_priv(xfer);
usbd_xfer_set_priv(xfer, NULL);
if (data != NULL)
zyd_tx_free(data, error);
if (error != USB_ERR_CANCELLED) {
if (error == USB_ERR_TIMEOUT)
device_printf(sc->sc_dev, "device timeout\n");
/*
* Try to clear stall first, also if other
* errors occur, hence clearing stall
* introduces a 50 ms delay:
*/
usbd_xfer_set_stall(xfer);
goto tr_setup;
}
break;
}
}
static int
zyd_tx_start(struct zyd_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct zyd_tx_desc *desc;
struct zyd_tx_data *data;
struct ieee80211_frame *wh;
const struct ieee80211_txparam *tp;
struct ieee80211_key *k;
int rate, totlen;
static const uint8_t ratediv[] = ZYD_TX_RATEDIV;
uint8_t phy;
uint16_t pktlen;
uint32_t bits;
wh = mtod(m0, struct ieee80211_frame *);
data = STAILQ_FIRST(&sc->tx_free);
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT ||
(wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
rate = tp->mgmtrate;
} else {
tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)];
/* for data frames */
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
rate = tp->mcastrate;
else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rate = tp->ucastrate;
else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
}
}
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
m_freem(m0);
return (ENOBUFS);
}
/* packet header may have moved, reset our local pointer */
wh = mtod(m0, struct ieee80211_frame *);
}
data->ni = ni;
data->m = m0;
data->rate = rate;
/* fill Tx descriptor */
desc = &data->desc;
phy = zyd_plcp_signal(sc, rate);
desc->phy = phy;
if (ZYD_RATE_IS_OFDM(rate)) {
desc->phy |= ZYD_TX_PHY_OFDM;
if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan))
desc->phy |= ZYD_TX_PHY_5GHZ;
} else if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
desc->phy |= ZYD_TX_PHY_SHPREAMBLE;
totlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN;
desc->len = htole16(totlen);
desc->flags = ZYD_TX_FLAG_BACKOFF;
if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
/* multicast frames are not sent at OFDM rates in 802.11b/g */
if (totlen > vap->iv_rtsthreshold) {
desc->flags |= ZYD_TX_FLAG_RTS;
} else if (ZYD_RATE_IS_OFDM(rate) &&
(ic->ic_flags & IEEE80211_F_USEPROT)) {
if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
desc->flags |= ZYD_TX_FLAG_CTS_TO_SELF;
else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
desc->flags |= ZYD_TX_FLAG_RTS;
}
} else
desc->flags |= ZYD_TX_FLAG_MULTICAST;
if ((wh->i_fc[0] &
(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
(IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_PS_POLL))
desc->flags |= ZYD_TX_FLAG_TYPE(ZYD_TX_TYPE_PS_POLL);
/* actual transmit length (XXX why +10?) */
pktlen = ZYD_TX_DESC_SIZE + 10;
if (sc->sc_macrev == ZYD_ZD1211)
pktlen += totlen;
desc->pktlen = htole16(pktlen);
bits = (rate == 11) ? (totlen * 16) + 10 :
((rate == 22) ? (totlen * 8) + 10 : (totlen * 8));
desc->plcp_length = htole16(bits / ratediv[phy]);
desc->plcp_service = 0;
if (rate == 22 && (bits % 11) > 0 && (bits % 11) <= 3)
desc->plcp_service |= ZYD_PLCP_LENGEXT;
desc->nextlen = 0;
if (ieee80211_radiotap_active_vap(vap)) {
struct zyd_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
ieee80211_radiotap_tx(vap, m0);
}
DPRINTF(sc, ZYD_DEBUG_XMIT,
"%s: sending data frame len=%zu rate=%u\n",
device_get_nameunit(sc->sc_dev), (size_t)m0->m_pkthdr.len,
rate);
STAILQ_INSERT_TAIL(&sc->tx_q, data, next);
usbd_transfer_start(sc->sc_xfer[ZYD_BULK_WR]);
return (0);
}
+static int
+zyd_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct zyd_softc *sc = ic->ic_softc;
+ int error;
+
+ ZYD_LOCK(sc);
+ if ((sc->sc_flags & ZYD_FLAG_RUNNING) == 0) {
+ ZYD_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ ZYD_UNLOCK(sc);
+ return (error);
+ }
+ zyd_start(sc);
+ ZYD_UNLOCK(sc);
+
+ return (0);
+}
+
static void
-zyd_start(struct ifnet *ifp)
+zyd_start(struct zyd_softc *sc)
{
- struct zyd_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct mbuf *m;
- ZYD_LOCK(sc);
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
- if (sc->tx_nfree == 0) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
+ ZYD_LOCK_ASSERT(sc, MA_OWNED);
+
+ while (sc->tx_nfree > 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
if (zyd_tx_start(sc, m, ni) != 0) {
ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ if_inc_counter(ni->ni_vap->iv_ifp,
+ IFCOUNTER_OERRORS, 1);
break;
}
}
- ZYD_UNLOCK(sc);
}
static int
zyd_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct zyd_softc *sc = ic->ic_softc;
ZYD_LOCK(sc);
/* prevent management frames from being sent if we're not ready */
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (!(sc->sc_flags & ZYD_FLAG_RUNNING)) {
ZYD_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return (ENETDOWN);
}
if (sc->tx_nfree == 0) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
ZYD_UNLOCK(sc);
m_freem(m);
ieee80211_free_node(ni);
return (ENOBUFS); /* XXX */
}
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
* XXX raw path
*/
if (zyd_tx_start(sc, m, ni) != 0) {
ZYD_UNLOCK(sc);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
return (EIO);
}
ZYD_UNLOCK(sc);
return (0);
}
-static int
-zyd_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+zyd_parent(struct ieee80211com *ic)
{
- struct ieee80211com *ic = ifp->if_l2com;
struct zyd_softc *sc = ic->ic_softc;
- struct ifreq *ifr = (struct ifreq *) data;
- int error;
int startall = 0;
ZYD_LOCK(sc);
- error = (sc->sc_flags & ZYD_FLAG_DETACHED) ? ENXIO : 0;
- ZYD_UNLOCK(sc);
- if (error)
- return (error);
-
- switch (cmd) {
- case SIOCSIFFLAGS:
- ZYD_LOCK(sc);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- zyd_init_locked(sc);
- startall = 1;
- } else
- zyd_set_multi(sc);
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- zyd_stop(sc);
- }
+ if (sc->sc_flags & ZYD_FLAG_DETACHED) {
ZYD_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
+ return;
}
- return (error);
+ if (ic->ic_nrunning > 0) {
+ if ((sc->sc_flags & ZYD_FLAG_RUNNING) == 0) {
+ zyd_init_locked(sc);
+ startall = 1;
+ } else
+ zyd_set_multi(sc);
+ } else if (sc->sc_flags & ZYD_FLAG_RUNNING)
+ zyd_stop(sc);
+ ZYD_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static void
zyd_init_locked(struct zyd_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct usb_config_descriptor *cd;
int error;
uint32_t val;
ZYD_LOCK_ASSERT(sc, MA_OWNED);
if (!(sc->sc_flags & ZYD_FLAG_INITONCE)) {
error = zyd_loadfirmware(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"could not load firmware (error=%d)\n", error);
goto fail;
}
/* reset device */
cd = usbd_get_config_descriptor(sc->sc_udev);
error = usbd_req_set_config(sc->sc_udev, &sc->sc_mtx,
cd->bConfigurationValue);
if (error)
device_printf(sc->sc_dev, "reset failed, continuing\n");
error = zyd_hw_init(sc);
if (error) {
device_printf(sc->sc_dev,
"hardware initialization failed\n");
goto fail;
}
device_printf(sc->sc_dev,
"HMAC ZD1211%s, FW %02x.%02x, RF %s S%x, PA%x LED %x "
"BE%x NP%x Gain%x F%x\n",
(sc->sc_macrev == ZYD_ZD1211) ? "": "B",
sc->sc_fwrev >> 8, sc->sc_fwrev & 0xff,
zyd_rf_name(sc->sc_rfrev), sc->sc_al2230s, sc->sc_parev,
sc->sc_ledtype, sc->sc_bandedge6, sc->sc_newphy,
sc->sc_cckgain, sc->sc_fix_cr157);
/* read regulatory domain (currently unused) */
zyd_read32_m(sc, ZYD_EEPROM_SUBID, &val);
sc->sc_regdomain = val >> 16;
DPRINTF(sc, ZYD_DEBUG_INIT, "regulatory domain %x\n",
sc->sc_regdomain);
/* we'll do software WEP decryption for now */
DPRINTF(sc, ZYD_DEBUG_INIT, "%s: setting encryption type\n",
__func__);
zyd_write32_m(sc, ZYD_MAC_ENCRYPTION_TYPE, ZYD_ENC_SNIFFER);
sc->sc_flags |= ZYD_FLAG_INITONCE;
}
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ if (sc->sc_flags & ZYD_FLAG_RUNNING)
zyd_stop(sc);
DPRINTF(sc, ZYD_DEBUG_INIT, "setting MAC address to %6D\n",
- IF_LLADDR(ifp), ":");
- error = zyd_set_macaddr(sc, IF_LLADDR(ifp));
+ vap ? vap->iv_myaddr : ic->ic_macaddr, ":");
+ error = zyd_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
if (error != 0)
return;
/* set basic rates */
if (ic->ic_curmode == IEEE80211_MODE_11B)
zyd_write32_m(sc, ZYD_MAC_BAS_RATE, 0x0003);
else if (ic->ic_curmode == IEEE80211_MODE_11A)
zyd_write32_m(sc, ZYD_MAC_BAS_RATE, 0x1500);
else /* assumes 802.11b/g */
zyd_write32_m(sc, ZYD_MAC_BAS_RATE, 0xff0f);
/* promiscuous mode */
zyd_write32_m(sc, ZYD_MAC_SNIFFER, 0);
/* multicast setup */
zyd_set_multi(sc);
/* set RX filter */
error = zyd_set_rxfilter(sc);
if (error != 0)
goto fail;
/* switch radio transmitter ON */
error = zyd_switch_radio(sc, 1);
if (error != 0)
goto fail;
/* set default BSS channel */
zyd_set_chan(sc, ic->ic_curchan);
/*
* Allocate Tx and Rx xfer queues.
*/
zyd_setup_tx_list(sc);
/* enable interrupts */
zyd_write32_m(sc, ZYD_CR_INTERRUPT, ZYD_HWINT_MASK);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ sc->sc_flags |= ZYD_FLAG_RUNNING;
usbd_xfer_set_stall(sc->sc_xfer[ZYD_BULK_WR]);
usbd_transfer_start(sc->sc_xfer[ZYD_BULK_RD]);
usbd_transfer_start(sc->sc_xfer[ZYD_INTR_RD]);
return;
fail: zyd_stop(sc);
return;
}
static void
-zyd_init(void *priv)
-{
- struct zyd_softc *sc = priv;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
-
- ZYD_LOCK(sc);
- zyd_init_locked(sc);
- ZYD_UNLOCK(sc);
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ieee80211_start_all(ic); /* start all vap's */
-}
-
-static void
zyd_stop(struct zyd_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
int error;
ZYD_LOCK_ASSERT(sc, MA_OWNED);
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ sc->sc_flags &= ~ZYD_FLAG_RUNNING;
/*
* Drain all the transfers, if not already drained:
*/
ZYD_UNLOCK(sc);
usbd_transfer_drain(sc->sc_xfer[ZYD_BULK_WR]);
usbd_transfer_drain(sc->sc_xfer[ZYD_BULK_RD]);
ZYD_LOCK(sc);
zyd_unsetup_tx_list(sc);
/* Stop now if the device was never set up */
if (!(sc->sc_flags & ZYD_FLAG_INITONCE))
return;
/* switch radio transmitter OFF */
error = zyd_switch_radio(sc, 0);
if (error != 0)
goto fail;
/* disable Rx */
zyd_write32_m(sc, ZYD_MAC_RXFILTER, 0);
/* disable interrupts */
zyd_write32_m(sc, ZYD_CR_INTERRUPT, 0);
fail:
return;
}
static int
zyd_loadfirmware(struct zyd_softc *sc)
{
struct usb_device_request req;
size_t size;
u_char *fw;
uint8_t stat;
uint16_t addr;
if (sc->sc_flags & ZYD_FLAG_FWLOADED)
return (0);
if (sc->sc_macrev == ZYD_ZD1211) {
fw = (u_char *)zd1211_firmware;
size = sizeof(zd1211_firmware);
} else {
fw = (u_char *)zd1211b_firmware;
size = sizeof(zd1211b_firmware);
}
req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
req.bRequest = ZYD_DOWNLOADREQ;
USETW(req.wIndex, 0);
addr = ZYD_FIRMWARE_START_ADDR;
while (size > 0) {
/*
* When the transfer size is 4096 bytes, it is not
* likely to be able to transfer it.
* The cause is port or machine or chip?
*/
const int mlen = min(size, 64);
DPRINTF(sc, ZYD_DEBUG_FW,
"loading firmware block: len=%d, addr=0x%x\n", mlen, addr);
USETW(req.wValue, addr);
USETW(req.wLength, mlen);
if (zyd_do_request(sc, &req, fw) != 0)
return (EIO);
addr += mlen / 2;
fw += mlen;
size -= mlen;
}
/* check whether the upload succeeded */
req.bmRequestType = UT_READ_VENDOR_DEVICE;
req.bRequest = ZYD_DOWNLOADSTS;
USETW(req.wValue, 0);
USETW(req.wIndex, 0);
USETW(req.wLength, sizeof(stat));
if (zyd_do_request(sc, &req, &stat) != 0)
return (EIO);
sc->sc_flags |= ZYD_FLAG_FWLOADED;
return (stat & 0x80) ? (EIO) : (0);
}
static void
zyd_scan_start(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
struct zyd_softc *sc = ic->ic_softc;
ZYD_LOCK(sc);
/* want broadcast address while scanning */
- zyd_set_bssid(sc, ifp->if_broadcastaddr);
+ zyd_set_bssid(sc, ieee80211broadcastaddr);
ZYD_UNLOCK(sc);
}
static void
zyd_scan_end(struct ieee80211com *ic)
{
struct zyd_softc *sc = ic->ic_softc;
ZYD_LOCK(sc);
/* restore previous bssid */
- zyd_set_bssid(sc, sc->sc_bssid);
+ zyd_set_bssid(sc, ic->ic_macaddr);
ZYD_UNLOCK(sc);
}
static void
zyd_set_channel(struct ieee80211com *ic)
{
struct zyd_softc *sc = ic->ic_softc;
ZYD_LOCK(sc);
zyd_set_chan(sc, ic->ic_curchan);
ZYD_UNLOCK(sc);
}
static device_method_t zyd_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, zyd_match),
DEVMETHOD(device_attach, zyd_attach),
DEVMETHOD(device_detach, zyd_detach),
DEVMETHOD_END
};
static driver_t zyd_driver = {
.name = "zyd",
.methods = zyd_methods,
.size = sizeof(struct zyd_softc)
};
static devclass_t zyd_devclass;
DRIVER_MODULE(zyd, uhub, zyd_driver, zyd_devclass, NULL, 0);
MODULE_DEPEND(zyd, usb, 1, 1, 1);
MODULE_DEPEND(zyd, wlan, 1, 1, 1);
MODULE_VERSION(zyd, 1);
Index: head/sys/dev/usb/wlan/if_zydreg.h
===================================================================
--- head/sys/dev/usb/wlan/if_zydreg.h (revision 287196)
+++ head/sys/dev/usb/wlan/if_zydreg.h (revision 287197)
@@ -1,1314 +1,1315 @@
/* $OpenBSD: if_zydreg.h,v 1.19 2006/11/30 19:28:07 damien Exp $ */
/* $NetBSD: if_zydreg.h,v 1.2 2007/06/16 11:18:45 kiyohara Exp $ */
/* $FreeBSD$ */
/*-
* Copyright (c) 2006 by Damien Bergamini <damien.bergamini@free.fr>
* Copyright (c) 2006 by Florian Stoehr <ich@florian-stoehr.de>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* ZyDAS ZD1211/ZD1211B USB WLAN driver.
*/
#define ZYD_CR_GPI_EN 0x9418
#define ZYD_CR_RADIO_PD 0x942c
#define ZYD_CR_RF2948_PD 0x942c
#define ZYD_CR_EN_PS_MANUAL_AGC 0x943c
#define ZYD_CR_CONFIG_PHILIPS 0x9440
#define ZYD_CR_I2C_WRITE 0x9444
#define ZYD_CR_SA2400_SER_RP 0x9448
#define ZYD_CR_RADIO_PE 0x9458
#define ZYD_CR_RST_BUS_MASTER 0x945c
#define ZYD_CR_RFCFG 0x9464
#define ZYD_CR_HSTSCHG 0x946c
#define ZYD_CR_PHY_ON 0x9474
#define ZYD_CR_RX_DELAY 0x9478
#define ZYD_CR_RX_PE_DELAY 0x947c
#define ZYD_CR_GPIO_1 0x9490
#define ZYD_CR_GPIO_2 0x9494
#define ZYD_CR_EnZYD_CRyBufMux 0x94a8
#define ZYD_CR_PS_CTRL 0x9500
#define ZYD_CR_ADDA_PWR_DWN 0x9504
#define ZYD_CR_ADDA_MBIAS_WT 0x9508
#define ZYD_CR_INTERRUPT 0x9510
#define ZYD_CR_MAC_PS_STATE 0x950c
#define ZYD_CR_ATIM_WND_PERIOD 0x951c
#define ZYD_CR_BCN_INTERVAL 0x9520
#define ZYD_CR_PRE_TBTT 0x9524
/*
* MAC registers.
*/
#define ZYD_MAC_MACADRL 0x9610 /* MAC address (low) */
#define ZYD_MAC_MACADRH 0x9614 /* MAC address (high) */
#define ZYD_MAC_BSSADRL 0x9618 /* BSS address (low) */
#define ZYD_MAC_BSSADRH 0x961c /* BSS address (high) */
#define ZYD_MAC_BCNCFG 0x9620 /* BCN configuration */
#define ZYD_MAC_GHTBL 0x9624 /* Group hash table (low) */
#define ZYD_MAC_GHTBH 0x9628 /* Group hash table (high) */
#define ZYD_MAC_RX_TIMEOUT 0x962c /* Rx timeout value */
#define ZYD_MAC_BAS_RATE 0x9630 /* Basic rate setting */
#define ZYD_MAC_MAN_RATE 0x9634 /* Mandatory rate setting */
#define ZYD_MAC_RTSCTSRATE 0x9638 /* RTS CTS rate */
#define ZYD_MAC_BACKOFF_PROTECT 0x963c /* Backoff protection */
#define ZYD_MAC_RX_THRESHOLD 0x9640 /* Rx threshold */
#define ZYD_MAC_TX_PE_CONTROL 0x9644 /* Tx_PE control */
#define ZYD_MAC_AFTER_PNP 0x9648 /* After PnP */
#define ZYD_MAC_RX_PE_DELAY 0x964c /* Rx_pe delay */
#define ZYD_MAC_RX_ADDR2_L 0x9650 /* RX address2 (low) */
#define ZYD_MAC_RX_ADDR2_H 0x9654 /* RX address2 (high) */
#define ZYD_MAC_SIFS_ACK_TIME 0x9658 /* Dynamic SIFS ack time */
#define ZYD_MAC_PHY_DELAY 0x9660 /* PHY delay */
#define ZYD_MAC_PHY_DELAY2 0x966c /* PHY delay */
#define ZYD_MAC_BCNFIFO 0x9670 /* Beacon FIFO I/O port */
#define ZYD_MAC_SNIFFER 0x9674 /* Sniffer on/off */
#define ZYD_MAC_ENCRYPTION_TYPE 0x9678 /* Encryption type */
#define ZYD_MAC_RETRY 0x967c /* Retry time */
#define ZYD_MAC_MISC 0x9680 /* Misc */
#define ZYD_MAC_STMACHINESTAT 0x9684 /* State machine status */
#define ZYD_MAC_TX_UNDERRUN_CNT 0x9688 /* TX underrun counter */
#define ZYD_MAC_RXFILTER 0x968c /* Send to host settings */
#define ZYD_MAC_ACK_EXT 0x9690 /* Acknowledge extension */
#define ZYD_MAC_BCNFIFOST 0x9694 /* BCN FIFO set and status */
#define ZYD_MAC_DIFS_EIFS_SIFS 0x9698 /* DIFS, EIFS & SIFS settings */
#define ZYD_MAC_RX_TIMEOUT_CNT 0x969c /* RX timeout count */
#define ZYD_MAC_RX_TOTAL_FRAME 0x96a0 /* RX total frame count */
#define ZYD_MAC_RX_CRC32_CNT 0x96a4 /* RX CRC32 frame count */
#define ZYD_MAC_RX_CRC16_CNT 0x96a8 /* RX CRC16 frame count */
#define ZYD_MAC_RX_UDEC 0x96ac /* RX unicast decr. error count */
#define ZYD_MAC_RX_OVERRUN_CNT 0x96b0 /* RX FIFO overrun count */
#define ZYD_MAC_RX_MDEC 0x96bc /* RX multicast decr. err. cnt. */
#define ZYD_MAC_NAV_TCR 0x96c4 /* NAV timer count read */
#define ZYD_MAC_BACKOFF_ST_RD 0x96c8 /* Backoff status read */
#define ZYD_MAC_DM_RETRY_CNT_RD 0x96cc /* DM retry count read */
#define ZYD_MAC_RX_ACR 0x96d0 /* RX arbitration count read */
#define ZYD_MAC_TX_CCR 0x96d4 /* Tx complete count read */
#define ZYD_MAC_TCB_ADDR 0x96e8 /* Current PCI process TCP addr */
#define ZYD_MAC_RCB_ADDR 0x96ec /* Next RCB address */
#define ZYD_MAC_CONT_WIN_LIMIT 0x96f0 /* Contention window limit */
#define ZYD_MAC_TX_PKT 0x96f4 /* Tx total packet count read */
#define ZYD_MAC_DL_CTRL 0x96f8 /* Download control */
#define ZYD_MAC_CAM_MODE 0x9700 /* CAM: Continuous Access Mode */
#define ZYD_MACB_TXPWR_CTL1 0x9b00
#define ZYD_MACB_TXPWR_CTL2 0x9b04
#define ZYD_MACB_TXPWR_CTL3 0x9b08
#define ZYD_MACB_TXPWR_CTL4 0x9b0c
#define ZYD_MACB_AIFS_CTL1 0x9b10
#define ZYD_MACB_AIFS_CTL2 0x9b14
#define ZYD_MACB_TXOP 0x9b20
#define ZYD_MACB_MAX_RETRY 0x9b28
/*
* Miscellaneous registers.
*/
#define ZYD_FIRMWARE_START_ADDR 0xee00
#define ZYD_FIRMWARE_BASE_ADDR 0xee1d /* Firmware base address */
/*
* EEPROM registers.
*/
#define ZYD_EEPROM_START_HEAD 0xf800 /* EEPROM start */
#define ZYD_EEPROM_SUBID 0xf817
#define ZYD_EEPROM_POD 0xf819
#define ZYD_EEPROM_MAC_ADDR_P1 0xf81b /* Part 1 of the MAC address */
#define ZYD_EEPROM_MAC_ADDR_P2 0xf81d /* Part 2 of the MAC address */
#define ZYD_EEPROM_PWR_CAL 0xf81f /* Calibration */
#define ZYD_EEPROM_PWR_INT 0xf827 /* Calibration */
#define ZYD_EEPROM_ALLOWEDCHAN 0xf82f /* Allowed CH mask, 1 bit each */
#define ZYD_EEPROM_DEVICE_VER 0xf837 /* Device version */
#define ZYD_EEPROM_PHY_REG 0xf83c /* PHY registers */
#define ZYD_EEPROM_36M_CAL 0xf83f /* Calibration */
#define ZYD_EEPROM_11A_INT 0xf847 /* Interpolation */
#define ZYD_EEPROM_48M_CAL 0xf84f /* Calibration */
#define ZYD_EEPROM_48M_INT 0xf857 /* Interpolation */
#define ZYD_EEPROM_54M_CAL 0xf85f /* Calibration */
#define ZYD_EEPROM_54M_INT 0xf867 /* Interpolation */
/*
* Firmware registers offsets (relative to fwbase).
*/
#define ZYD_FW_FIRMWARE_REV 0x0000 /* Firmware version */
#define ZYD_FW_USB_SPEED 0x0001 /* USB speed (!=0 if highspeed) */
#define ZYD_FW_FIX_TX_RATE 0x0002 /* Fixed TX rate */
#define ZYD_FW_LINK_STATUS 0x0003
#define ZYD_FW_SOFT_RESET 0x0004
#define ZYD_FW_FLASH_CHK 0x0005
/* possible flags for register ZYD_FW_LINK_STATUS */
#define ZYD_LED1 (1 << 8)
#define ZYD_LED2 (1 << 9)
/*
* RF IDs.
*/
#define ZYD_RF_UW2451 0x2 /* not supported yet */
#define ZYD_RF_UCHIP 0x3 /* not supported yet */
#define ZYD_RF_AL2230 0x4
#define ZYD_RF_AL7230B 0x5
#define ZYD_RF_THETA 0x6 /* not supported yet */
#define ZYD_RF_AL2210 0x7
#define ZYD_RF_MAXIM_NEW 0x8
#define ZYD_RF_GCT 0x9
#define ZYD_RF_AL2230S 0xa /* not supported yet */
#define ZYD_RF_RALINK 0xb /* not supported yet */
#define ZYD_RF_INTERSIL 0xc /* not supported yet */
#define ZYD_RF_RFMD 0xd
#define ZYD_RF_MAXIM_NEW2 0xe
#define ZYD_RF_PHILIPS 0xf /* not supported yet */
/*
* PHY registers (8 bits, not documented).
*/
#define ZYD_CR0 0x9000
#define ZYD_CR1 0x9004
#define ZYD_CR2 0x9008
#define ZYD_CR3 0x900c
#define ZYD_CR5 0x9010
#define ZYD_CR6 0x9014
#define ZYD_CR7 0x9018
#define ZYD_CR8 0x901c
#define ZYD_CR4 0x9020
#define ZYD_CR9 0x9024
#define ZYD_CR10 0x9028
#define ZYD_CR11 0x902c
#define ZYD_CR12 0x9030
#define ZYD_CR13 0x9034
#define ZYD_CR14 0x9038
#define ZYD_CR15 0x903c
#define ZYD_CR16 0x9040
#define ZYD_CR17 0x9044
#define ZYD_CR18 0x9048
#define ZYD_CR19 0x904c
#define ZYD_CR20 0x9050
#define ZYD_CR21 0x9054
#define ZYD_CR22 0x9058
#define ZYD_CR23 0x905c
#define ZYD_CR24 0x9060
#define ZYD_CR25 0x9064
#define ZYD_CR26 0x9068
#define ZYD_CR27 0x906c
#define ZYD_CR28 0x9070
#define ZYD_CR29 0x9074
#define ZYD_CR30 0x9078
#define ZYD_CR31 0x907c
#define ZYD_CR32 0x9080
#define ZYD_CR33 0x9084
#define ZYD_CR34 0x9088
#define ZYD_CR35 0x908c
#define ZYD_CR36 0x9090
#define ZYD_CR37 0x9094
#define ZYD_CR38 0x9098
#define ZYD_CR39 0x909c
#define ZYD_CR40 0x90a0
#define ZYD_CR41 0x90a4
#define ZYD_CR42 0x90a8
#define ZYD_CR43 0x90ac
#define ZYD_CR44 0x90b0
#define ZYD_CR45 0x90b4
#define ZYD_CR46 0x90b8
#define ZYD_CR47 0x90bc
#define ZYD_CR48 0x90c0
#define ZYD_CR49 0x90c4
#define ZYD_CR50 0x90c8
#define ZYD_CR51 0x90cc
#define ZYD_CR52 0x90d0
#define ZYD_CR53 0x90d4
#define ZYD_CR54 0x90d8
#define ZYD_CR55 0x90dc
#define ZYD_CR56 0x90e0
#define ZYD_CR57 0x90e4
#define ZYD_CR58 0x90e8
#define ZYD_CR59 0x90ec
#define ZYD_CR60 0x90f0
#define ZYD_CR61 0x90f4
#define ZYD_CR62 0x90f8
#define ZYD_CR63 0x90fc
#define ZYD_CR64 0x9100
#define ZYD_CR65 0x9104
#define ZYD_CR66 0x9108
#define ZYD_CR67 0x910c
#define ZYD_CR68 0x9110
#define ZYD_CR69 0x9114
#define ZYD_CR70 0x9118
#define ZYD_CR71 0x911c
#define ZYD_CR72 0x9120
#define ZYD_CR73 0x9124
#define ZYD_CR74 0x9128
#define ZYD_CR75 0x912c
#define ZYD_CR76 0x9130
#define ZYD_CR77 0x9134
#define ZYD_CR78 0x9138
#define ZYD_CR79 0x913c
#define ZYD_CR80 0x9140
#define ZYD_CR81 0x9144
#define ZYD_CR82 0x9148
#define ZYD_CR83 0x914c
#define ZYD_CR84 0x9150
#define ZYD_CR85 0x9154
#define ZYD_CR86 0x9158
#define ZYD_CR87 0x915c
#define ZYD_CR88 0x9160
#define ZYD_CR89 0x9164
#define ZYD_CR90 0x9168
#define ZYD_CR91 0x916c
#define ZYD_CR92 0x9170
#define ZYD_CR93 0x9174
#define ZYD_CR94 0x9178
#define ZYD_CR95 0x917c
#define ZYD_CR96 0x9180
#define ZYD_CR97 0x9184
#define ZYD_CR98 0x9188
#define ZYD_CR99 0x918c
#define ZYD_CR100 0x9190
#define ZYD_CR101 0x9194
#define ZYD_CR102 0x9198
#define ZYD_CR103 0x919c
#define ZYD_CR104 0x91a0
#define ZYD_CR105 0x91a4
#define ZYD_CR106 0x91a8
#define ZYD_CR107 0x91ac
#define ZYD_CR108 0x91b0
#define ZYD_CR109 0x91b4
#define ZYD_CR110 0x91b8
#define ZYD_CR111 0x91bc
#define ZYD_CR112 0x91c0
#define ZYD_CR113 0x91c4
#define ZYD_CR114 0x91c8
#define ZYD_CR115 0x91cc
#define ZYD_CR116 0x91d0
#define ZYD_CR117 0x91d4
#define ZYD_CR118 0x91d8
#define ZYD_CR119 0x91dc
#define ZYD_CR120 0x91e0
#define ZYD_CR121 0x91e4
#define ZYD_CR122 0x91e8
#define ZYD_CR123 0x91ec
#define ZYD_CR124 0x91f0
#define ZYD_CR125 0x91f4
#define ZYD_CR126 0x91f8
#define ZYD_CR127 0x91fc
#define ZYD_CR128 0x9200
#define ZYD_CR129 0x9204
#define ZYD_CR130 0x9208
#define ZYD_CR131 0x920c
#define ZYD_CR132 0x9210
#define ZYD_CR133 0x9214
#define ZYD_CR134 0x9218
#define ZYD_CR135 0x921c
#define ZYD_CR136 0x9220
#define ZYD_CR137 0x9224
#define ZYD_CR138 0x9228
#define ZYD_CR139 0x922c
#define ZYD_CR140 0x9230
#define ZYD_CR141 0x9234
#define ZYD_CR142 0x9238
#define ZYD_CR143 0x923c
#define ZYD_CR144 0x9240
#define ZYD_CR145 0x9244
#define ZYD_CR146 0x9248
#define ZYD_CR147 0x924c
#define ZYD_CR148 0x9250
#define ZYD_CR149 0x9254
#define ZYD_CR150 0x9258
#define ZYD_CR151 0x925c
#define ZYD_CR152 0x9260
#define ZYD_CR153 0x9264
#define ZYD_CR154 0x9268
#define ZYD_CR155 0x926c
#define ZYD_CR156 0x9270
#define ZYD_CR157 0x9274
#define ZYD_CR158 0x9278
#define ZYD_CR159 0x927c
#define ZYD_CR160 0x9280
#define ZYD_CR161 0x9284
#define ZYD_CR162 0x9288
#define ZYD_CR163 0x928c
#define ZYD_CR164 0x9290
#define ZYD_CR165 0x9294
#define ZYD_CR166 0x9298
#define ZYD_CR167 0x929c
#define ZYD_CR168 0x92a0
#define ZYD_CR169 0x92a4
#define ZYD_CR170 0x92a8
#define ZYD_CR171 0x92ac
#define ZYD_CR172 0x92b0
#define ZYD_CR173 0x92b4
#define ZYD_CR174 0x92b8
#define ZYD_CR175 0x92bc
#define ZYD_CR176 0x92c0
#define ZYD_CR177 0x92c4
#define ZYD_CR178 0x92c8
#define ZYD_CR179 0x92cc
#define ZYD_CR180 0x92d0
#define ZYD_CR181 0x92d4
#define ZYD_CR182 0x92d8
#define ZYD_CR183 0x92dc
#define ZYD_CR184 0x92e0
#define ZYD_CR185 0x92e4
#define ZYD_CR186 0x92e8
#define ZYD_CR187 0x92ec
#define ZYD_CR188 0x92f0
#define ZYD_CR189 0x92f4
#define ZYD_CR190 0x92f8
#define ZYD_CR191 0x92fc
#define ZYD_CR192 0x9300
#define ZYD_CR193 0x9304
#define ZYD_CR194 0x9308
#define ZYD_CR195 0x930c
#define ZYD_CR196 0x9310
#define ZYD_CR197 0x9314
#define ZYD_CR198 0x9318
#define ZYD_CR199 0x931c
#define ZYD_CR200 0x9320
#define ZYD_CR201 0x9324
#define ZYD_CR202 0x9328
#define ZYD_CR203 0x932c
#define ZYD_CR204 0x9330
#define ZYD_CR205 0x9334
#define ZYD_CR206 0x9338
#define ZYD_CR207 0x933c
#define ZYD_CR208 0x9340
#define ZYD_CR209 0x9344
#define ZYD_CR210 0x9348
#define ZYD_CR211 0x934c
#define ZYD_CR212 0x9350
#define ZYD_CR213 0x9354
#define ZYD_CR214 0x9358
#define ZYD_CR215 0x935c
#define ZYD_CR216 0x9360
#define ZYD_CR217 0x9364
#define ZYD_CR218 0x9368
#define ZYD_CR219 0x936c
#define ZYD_CR220 0x9370
#define ZYD_CR221 0x9374
#define ZYD_CR222 0x9378
#define ZYD_CR223 0x937c
#define ZYD_CR224 0x9380
#define ZYD_CR225 0x9384
#define ZYD_CR226 0x9388
#define ZYD_CR227 0x938c
#define ZYD_CR228 0x9390
#define ZYD_CR229 0x9394
#define ZYD_CR230 0x9398
#define ZYD_CR231 0x939c
#define ZYD_CR232 0x93a0
#define ZYD_CR233 0x93a4
#define ZYD_CR234 0x93a8
#define ZYD_CR235 0x93ac
#define ZYD_CR236 0x93b0
#define ZYD_CR240 0x93c0
#define ZYD_CR241 0x93c4
#define ZYD_CR242 0x93c8
#define ZYD_CR243 0x93cc
#define ZYD_CR244 0x93d0
#define ZYD_CR245 0x93d4
#define ZYD_CR251 0x93ec
#define ZYD_CR252 0x93f0
#define ZYD_CR253 0x93f4
#define ZYD_CR254 0x93f8
#define ZYD_CR255 0x93fc
/* copied nearly verbatim from the Linux driver rewrite */
#define ZYD_DEF_PHY \
{ \
{ ZYD_CR0, 0x0a }, { ZYD_CR1, 0x06 }, { ZYD_CR2, 0x26 }, \
{ ZYD_CR3, 0x38 }, { ZYD_CR4, 0x80 }, { ZYD_CR9, 0xa0 }, \
{ ZYD_CR10, 0x81 }, { ZYD_CR11, 0x00 }, { ZYD_CR12, 0x7f }, \
{ ZYD_CR13, 0x8c }, { ZYD_CR14, 0x80 }, { ZYD_CR15, 0x3d }, \
{ ZYD_CR16, 0x20 }, { ZYD_CR17, 0x1e }, { ZYD_CR18, 0x0a }, \
{ ZYD_CR19, 0x48 }, { ZYD_CR20, 0x0c }, { ZYD_CR21, 0x0c }, \
{ ZYD_CR22, 0x23 }, { ZYD_CR23, 0x90 }, { ZYD_CR24, 0x14 }, \
{ ZYD_CR25, 0x40 }, { ZYD_CR26, 0x10 }, { ZYD_CR27, 0x19 }, \
{ ZYD_CR28, 0x7f }, { ZYD_CR29, 0x80 }, { ZYD_CR30, 0x4b }, \
{ ZYD_CR31, 0x60 }, { ZYD_CR32, 0x43 }, { ZYD_CR33, 0x08 }, \
{ ZYD_CR34, 0x06 }, { ZYD_CR35, 0x0a }, { ZYD_CR36, 0x00 }, \
{ ZYD_CR37, 0x00 }, { ZYD_CR38, 0x38 }, { ZYD_CR39, 0x0c }, \
{ ZYD_CR40, 0x84 }, { ZYD_CR41, 0x2a }, { ZYD_CR42, 0x80 }, \
{ ZYD_CR43, 0x10 }, { ZYD_CR44, 0x12 }, { ZYD_CR46, 0xff }, \
{ ZYD_CR47, 0x1e }, { ZYD_CR48, 0x26 }, { ZYD_CR49, 0x5b }, \
{ ZYD_CR64, 0xd0 }, { ZYD_CR65, 0x04 }, { ZYD_CR66, 0x58 }, \
{ ZYD_CR67, 0xc9 }, { ZYD_CR68, 0x88 }, { ZYD_CR69, 0x41 }, \
{ ZYD_CR70, 0x23 }, { ZYD_CR71, 0x10 }, { ZYD_CR72, 0xff }, \
{ ZYD_CR73, 0x32 }, { ZYD_CR74, 0x30 }, { ZYD_CR75, 0x65 }, \
{ ZYD_CR76, 0x41 }, { ZYD_CR77, 0x1b }, { ZYD_CR78, 0x30 }, \
{ ZYD_CR79, 0x68 }, { ZYD_CR80, 0x64 }, { ZYD_CR81, 0x64 }, \
{ ZYD_CR82, 0x00 }, { ZYD_CR83, 0x00 }, { ZYD_CR84, 0x00 }, \
{ ZYD_CR85, 0x02 }, { ZYD_CR86, 0x00 }, { ZYD_CR87, 0x00 }, \
{ ZYD_CR88, 0xff }, { ZYD_CR89, 0xfc }, { ZYD_CR90, 0x00 }, \
{ ZYD_CR91, 0x00 }, { ZYD_CR92, 0x00 }, { ZYD_CR93, 0x08 }, \
{ ZYD_CR94, 0x00 }, { ZYD_CR95, 0x00 }, { ZYD_CR96, 0xff }, \
{ ZYD_CR97, 0xe7 }, { ZYD_CR98, 0x00 }, { ZYD_CR99, 0x00 }, \
{ ZYD_CR100, 0x00 }, { ZYD_CR101, 0xae }, { ZYD_CR102, 0x02 }, \
{ ZYD_CR103, 0x00 }, { ZYD_CR104, 0x03 }, { ZYD_CR105, 0x65 }, \
{ ZYD_CR106, 0x04 }, { ZYD_CR107, 0x00 }, { ZYD_CR108, 0x0a }, \
{ ZYD_CR109, 0xaa }, { ZYD_CR110, 0xaa }, { ZYD_CR111, 0x25 }, \
{ ZYD_CR112, 0x25 }, { ZYD_CR113, 0x00 }, { ZYD_CR119, 0x1e }, \
{ ZYD_CR125, 0x90 }, { ZYD_CR126, 0x00 }, { ZYD_CR127, 0x00 }, \
{ ZYD_CR5, 0x00 }, { ZYD_CR6, 0x00 }, { ZYD_CR7, 0x00 }, \
{ ZYD_CR8, 0x00 }, { ZYD_CR9, 0x20 }, { ZYD_CR12, 0xf0 }, \
{ ZYD_CR20, 0x0e }, { ZYD_CR21, 0x0e }, { ZYD_CR27, 0x10 }, \
{ ZYD_CR44, 0x33 }, { ZYD_CR47, 0x1E }, { ZYD_CR83, 0x24 }, \
{ ZYD_CR84, 0x04 }, { ZYD_CR85, 0x00 }, { ZYD_CR86, 0x0C }, \
{ ZYD_CR87, 0x12 }, { ZYD_CR88, 0x0C }, { ZYD_CR89, 0x00 }, \
{ ZYD_CR90, 0x10 }, { ZYD_CR91, 0x08 }, { ZYD_CR93, 0x00 }, \
{ ZYD_CR94, 0x01 }, { ZYD_CR95, 0x00 }, { ZYD_CR96, 0x50 }, \
{ ZYD_CR97, 0x37 }, { ZYD_CR98, 0x35 }, { ZYD_CR101, 0x13 }, \
{ ZYD_CR102, 0x27 }, { ZYD_CR103, 0x27 }, { ZYD_CR104, 0x18 }, \
{ ZYD_CR105, 0x12 }, { ZYD_CR109, 0x27 }, { ZYD_CR110, 0x27 }, \
{ ZYD_CR111, 0x27 }, { ZYD_CR112, 0x27 }, { ZYD_CR113, 0x27 }, \
{ ZYD_CR114, 0x27 }, { ZYD_CR115, 0x26 }, { ZYD_CR116, 0x24 }, \
{ ZYD_CR117, 0xfc }, { ZYD_CR118, 0xfa }, { ZYD_CR120, 0x4f }, \
{ ZYD_CR125, 0xaa }, { ZYD_CR127, 0x03 }, { ZYD_CR128, 0x14 }, \
{ ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, { ZYD_CR131, 0x0C }, \
{ ZYD_CR136, 0xdf }, { ZYD_CR137, 0x40 }, { ZYD_CR138, 0xa0 }, \
{ ZYD_CR139, 0xb0 }, { ZYD_CR140, 0x99 }, { ZYD_CR141, 0x82 }, \
{ ZYD_CR142, 0x54 }, { ZYD_CR143, 0x1c }, { ZYD_CR144, 0x6c }, \
{ ZYD_CR147, 0x07 }, { ZYD_CR148, 0x4c }, { ZYD_CR149, 0x50 }, \
{ ZYD_CR150, 0x0e }, { ZYD_CR151, 0x18 }, { ZYD_CR160, 0xfe }, \
{ ZYD_CR161, 0xee }, { ZYD_CR162, 0xaa }, { ZYD_CR163, 0xfa }, \
{ ZYD_CR164, 0xfa }, { ZYD_CR165, 0xea }, { ZYD_CR166, 0xbe }, \
{ ZYD_CR167, 0xbe }, { ZYD_CR168, 0x6a }, { ZYD_CR169, 0xba }, \
{ ZYD_CR170, 0xba }, { ZYD_CR171, 0xba }, { ZYD_CR204, 0x7d }, \
{ ZYD_CR203, 0x30 }, { 0, 0} \
}
#define ZYD_DEF_PHYB \
{ \
{ ZYD_CR0, 0x14 }, { ZYD_CR1, 0x06 }, { ZYD_CR2, 0x26 }, \
{ ZYD_CR3, 0x38 }, { ZYD_CR4, 0x80 }, { ZYD_CR9, 0xe0 }, \
{ ZYD_CR10, 0x81 }, { ZYD_CR11, 0x00 }, { ZYD_CR12, 0xf0 }, \
{ ZYD_CR13, 0x8c }, { ZYD_CR14, 0x80 }, { ZYD_CR15, 0x3d }, \
{ ZYD_CR16, 0x20 }, { ZYD_CR17, 0x1e }, { ZYD_CR18, 0x0a }, \
{ ZYD_CR19, 0x48 }, { ZYD_CR20, 0x10 }, { ZYD_CR21, 0x0e }, \
{ ZYD_CR22, 0x23 }, { ZYD_CR23, 0x90 }, { ZYD_CR24, 0x14 }, \
{ ZYD_CR25, 0x40 }, { ZYD_CR26, 0x10 }, { ZYD_CR27, 0x10 }, \
{ ZYD_CR28, 0x7f }, { ZYD_CR29, 0x80 }, { ZYD_CR30, 0x4b }, \
{ ZYD_CR31, 0x60 }, { ZYD_CR32, 0x43 }, { ZYD_CR33, 0x08 }, \
{ ZYD_CR34, 0x06 }, { ZYD_CR35, 0x0a }, { ZYD_CR36, 0x00 }, \
{ ZYD_CR37, 0x00 }, { ZYD_CR38, 0x38 }, { ZYD_CR39, 0x0c }, \
{ ZYD_CR40, 0x84 }, { ZYD_CR41, 0x2a }, { ZYD_CR42, 0x80 }, \
{ ZYD_CR43, 0x10 }, { ZYD_CR44, 0x33 }, { ZYD_CR46, 0xff }, \
{ ZYD_CR47, 0x1E }, { ZYD_CR48, 0x26 }, { ZYD_CR49, 0x5b }, \
{ ZYD_CR64, 0xd0 }, { ZYD_CR65, 0x04 }, { ZYD_CR66, 0x58 }, \
{ ZYD_CR67, 0xc9 }, { ZYD_CR68, 0x88 }, { ZYD_CR69, 0x41 }, \
{ ZYD_CR70, 0x23 }, { ZYD_CR71, 0x10 }, { ZYD_CR72, 0xff }, \
{ ZYD_CR73, 0x32 }, { ZYD_CR74, 0x30 }, { ZYD_CR75, 0x65 }, \
{ ZYD_CR76, 0x41 }, { ZYD_CR77, 0x1b }, { ZYD_CR78, 0x30 }, \
{ ZYD_CR79, 0xf0 }, { ZYD_CR80, 0x64 }, { ZYD_CR81, 0x64 }, \
{ ZYD_CR82, 0x00 }, { ZYD_CR83, 0x24 }, { ZYD_CR84, 0x04 }, \
{ ZYD_CR85, 0x00 }, { ZYD_CR86, 0x0c }, { ZYD_CR87, 0x12 }, \
{ ZYD_CR88, 0x0c }, { ZYD_CR89, 0x00 }, { ZYD_CR90, 0x58 }, \
{ ZYD_CR91, 0x04 }, { ZYD_CR92, 0x00 }, { ZYD_CR93, 0x00 }, \
{ ZYD_CR94, 0x01 }, { ZYD_CR95, 0x20 }, { ZYD_CR96, 0x50 }, \
{ ZYD_CR97, 0x37 }, { ZYD_CR98, 0x35 }, { ZYD_CR99, 0x00 }, \
{ ZYD_CR100, 0x01 }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \
{ ZYD_CR103, 0x27 }, { ZYD_CR104, 0x18 }, { ZYD_CR105, 0x12 }, \
{ ZYD_CR106, 0x04 }, { ZYD_CR107, 0x00 }, { ZYD_CR108, 0x0a }, \
{ ZYD_CR109, 0x27 }, { ZYD_CR110, 0x27 }, { ZYD_CR111, 0x27 }, \
{ ZYD_CR112, 0x27 }, { ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, \
{ ZYD_CR115, 0x26 }, { ZYD_CR116, 0x24 }, { ZYD_CR117, 0xfc }, \
{ ZYD_CR118, 0xfa }, { ZYD_CR119, 0x1e }, { ZYD_CR125, 0x90 }, \
{ ZYD_CR126, 0x00 }, { ZYD_CR127, 0x00 }, { ZYD_CR128, 0x14 }, \
{ ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, { ZYD_CR131, 0x0c }, \
{ ZYD_CR136, 0xdf }, { ZYD_CR137, 0xa0 }, { ZYD_CR138, 0xa8 }, \
{ ZYD_CR139, 0xb4 }, { ZYD_CR140, 0x98 }, { ZYD_CR141, 0x82 }, \
{ ZYD_CR142, 0x53 }, { ZYD_CR143, 0x1c }, { ZYD_CR144, 0x6c }, \
{ ZYD_CR147, 0x07 }, { ZYD_CR148, 0x40 }, { ZYD_CR149, 0x40 }, \
{ ZYD_CR150, 0x14 }, { ZYD_CR151, 0x18 }, { ZYD_CR159, 0x70 }, \
{ ZYD_CR160, 0xfe }, { ZYD_CR161, 0xee }, { ZYD_CR162, 0xaa }, \
{ ZYD_CR163, 0xfa }, { ZYD_CR164, 0xfa }, { ZYD_CR165, 0xea }, \
{ ZYD_CR166, 0xbe }, { ZYD_CR167, 0xbe }, { ZYD_CR168, 0x6a }, \
{ ZYD_CR169, 0xba }, { ZYD_CR170, 0xba }, { ZYD_CR171, 0xba }, \
{ ZYD_CR204, 0x7d }, { ZYD_CR203, 0x30 }, \
{ 0, 0 } \
}
#define ZYD_RFMD_PHY \
{ \
{ ZYD_CR2, 0x1e }, { ZYD_CR9, 0x20 }, { ZYD_CR10, 0x89 }, \
{ ZYD_CR11, 0x00 }, { ZYD_CR15, 0xd0 }, { ZYD_CR17, 0x68 }, \
{ ZYD_CR19, 0x4a }, { ZYD_CR20, 0x0c }, { ZYD_CR21, 0x0e }, \
{ ZYD_CR23, 0x48 }, { ZYD_CR24, 0x14 }, { ZYD_CR26, 0x90 }, \
{ ZYD_CR27, 0x30 }, { ZYD_CR29, 0x20 }, { ZYD_CR31, 0xb2 }, \
{ ZYD_CR32, 0x43 }, { ZYD_CR33, 0x28 }, { ZYD_CR38, 0x30 }, \
{ ZYD_CR34, 0x0f }, { ZYD_CR35, 0xf0 }, { ZYD_CR41, 0x2a }, \
{ ZYD_CR46, 0x7f }, { ZYD_CR47, 0x1e }, { ZYD_CR51, 0xc5 }, \
{ ZYD_CR52, 0xc5 }, { ZYD_CR53, 0xc5 }, { ZYD_CR79, 0x58 }, \
{ ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR82, 0x00 }, \
{ ZYD_CR83, 0x24 }, { ZYD_CR84, 0x04 }, { ZYD_CR85, 0x00 }, \
{ ZYD_CR86, 0x10 }, { ZYD_CR87, 0x2a }, { ZYD_CR88, 0x10 }, \
{ ZYD_CR89, 0x24 }, { ZYD_CR90, 0x18 }, { ZYD_CR91, 0x00 }, \
{ ZYD_CR92, 0x0a }, { ZYD_CR93, 0x00 }, { ZYD_CR94, 0x01 }, \
{ ZYD_CR95, 0x00 }, { ZYD_CR96, 0x40 }, { ZYD_CR97, 0x37 }, \
{ ZYD_CR98, 0x05 }, { ZYD_CR99, 0x28 }, { ZYD_CR100, 0x00 }, \
{ ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, { ZYD_CR103, 0x27 }, \
{ ZYD_CR104, 0x18 }, { ZYD_CR105, 0x12 }, { ZYD_CR106, 0x1a }, \
{ ZYD_CR107, 0x24 }, { ZYD_CR108, 0x0a }, { ZYD_CR109, 0x13 }, \
{ ZYD_CR110, 0x2f }, { ZYD_CR111, 0x27 }, { ZYD_CR112, 0x27 }, \
{ ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x40 }, \
{ ZYD_CR116, 0x40 }, { ZYD_CR117, 0xf0 }, { ZYD_CR118, 0xf0 }, \
{ ZYD_CR119, 0x16 }, { ZYD_CR122, 0x00 }, { ZYD_CR127, 0x03 }, \
{ ZYD_CR131, 0x08 }, { ZYD_CR138, 0x28 }, { ZYD_CR148, 0x44 }, \
{ ZYD_CR150, 0x10 }, { ZYD_CR169, 0xbb }, { ZYD_CR170, 0xbb } \
}
#define ZYD_RFMD_RF \
{ \
0x000007, 0x07dd43, 0x080959, 0x0e6666, 0x116a57, 0x17dd43, \
0x1819f9, 0x1e6666, 0x214554, 0x25e7fa, 0x27fffa, 0x294128, \
0x2c0000, 0x300000, 0x340000, 0x381e0f, 0x6c180f \
}
#define ZYD_RFMD_CHANTABLE \
{ \
{ 0x181979, 0x1e6666 }, \
{ 0x181989, 0x1e6666 }, \
{ 0x181999, 0x1e6666 }, \
{ 0x1819a9, 0x1e6666 }, \
{ 0x1819b9, 0x1e6666 }, \
{ 0x1819c9, 0x1e6666 }, \
{ 0x1819d9, 0x1e6666 }, \
{ 0x1819e9, 0x1e6666 }, \
{ 0x1819f9, 0x1e6666 }, \
{ 0x181a09, 0x1e6666 }, \
{ 0x181a19, 0x1e6666 }, \
{ 0x181a29, 0x1e6666 }, \
{ 0x181a39, 0x1e6666 }, \
{ 0x181a60, 0x1c0000 } \
}
#define ZYD_AL2230_PHY \
{ \
{ ZYD_CR15, 0x20 }, { ZYD_CR23, 0x40 }, { ZYD_CR24, 0x20 }, \
{ ZYD_CR26, 0x11 }, { ZYD_CR28, 0x3e }, { ZYD_CR29, 0x00 }, \
{ ZYD_CR44, 0x33 }, { ZYD_CR106, 0x2a }, { ZYD_CR107, 0x1a }, \
{ ZYD_CR109, 0x09 }, { ZYD_CR110, 0x27 }, { ZYD_CR111, 0x2b }, \
{ ZYD_CR112, 0x2b }, { ZYD_CR119, 0x0a }, { ZYD_CR10, 0x89 }, \
{ ZYD_CR17, 0x28 }, { ZYD_CR26, 0x93 }, { ZYD_CR34, 0x30 }, \
{ ZYD_CR35, 0x3e }, { ZYD_CR41, 0x24 }, { ZYD_CR44, 0x32 }, \
{ ZYD_CR46, 0x96 }, { ZYD_CR47, 0x1e }, { ZYD_CR79, 0x58 }, \
{ ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR87, 0x0a }, \
{ ZYD_CR89, 0x04 }, { ZYD_CR92, 0x0a }, { ZYD_CR99, 0x28 }, \
{ ZYD_CR100, 0x00 }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \
{ ZYD_CR106, 0x24 }, { ZYD_CR107, 0x2a }, { ZYD_CR109, 0x09 }, \
{ ZYD_CR110, 0x13 }, { ZYD_CR111, 0x1f }, { ZYD_CR112, 0x1f }, \
{ ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x24 }, \
{ ZYD_CR116, 0x24 }, { ZYD_CR117, 0xf4 }, { ZYD_CR118, 0xfc }, \
{ ZYD_CR119, 0x10 }, { ZYD_CR120, 0x4f }, { ZYD_CR121, 0x77 }, \
{ ZYD_CR122, 0xe0 }, { ZYD_CR137, 0x88 }, { ZYD_CR252, 0xff }, \
{ ZYD_CR253, 0xff }, { ZYD_CR251, 0x2f }, { ZYD_CR251, 0x3f }, \
{ ZYD_CR138, 0x28 }, { ZYD_CR203, 0x06 } \
}
#define ZYD_AL2230_PHY_B \
{ \
{ ZYD_CR10, 0x89 }, { ZYD_CR15, 0x20 }, { ZYD_CR17, 0x2B }, \
{ ZYD_CR23, 0x40 }, { ZYD_CR24, 0x20 }, { ZYD_CR26, 0x93 }, \
{ ZYD_CR28, 0x3e }, { ZYD_CR29, 0x00 }, { ZYD_CR33, 0x28 }, \
{ ZYD_CR34, 0x30 }, { ZYD_CR35, 0x3e }, { ZYD_CR41, 0x24 }, \
{ ZYD_CR44, 0x32 }, { ZYD_CR46, 0x99 }, { ZYD_CR47, 0x1e }, \
{ ZYD_CR48, 0x06 }, { ZYD_CR49, 0xf9 }, { ZYD_CR51, 0x01 }, \
{ ZYD_CR52, 0x80 }, { ZYD_CR53, 0x7e }, { ZYD_CR65, 0x00 }, \
{ ZYD_CR66, 0x00 }, { ZYD_CR67, 0x00 }, { ZYD_CR68, 0x00 }, \
{ ZYD_CR69, 0x28 }, { ZYD_CR79, 0x58 }, { ZYD_CR80, 0x30 }, \
{ ZYD_CR81, 0x30 }, { ZYD_CR87, 0x0a }, { ZYD_CR89, 0x04 }, \
{ ZYD_CR91, 0x00 }, { ZYD_CR92, 0x0a }, { ZYD_CR98, 0x8d }, \
{ ZYD_CR99, 0x00 }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \
{ ZYD_CR106, 0x24 }, { ZYD_CR107, 0x2a }, { ZYD_CR109, 0x13 }, \
{ ZYD_CR110, 0x1f }, { ZYD_CR111, 0x1f }, { ZYD_CR112, 0x1f }, \
{ ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x26 }, \
{ ZYD_CR116, 0x24 }, { ZYD_CR117, 0xfa }, { ZYD_CR118, 0xfa }, \
{ ZYD_CR119, 0x10 }, { ZYD_CR120, 0x4f }, { ZYD_CR121, 0x6c }, \
{ ZYD_CR122, 0xfc }, { ZYD_CR123, 0x57 }, { ZYD_CR125, 0xad }, \
{ ZYD_CR126, 0x6c }, { ZYD_CR127, 0x03 }, { ZYD_CR137, 0x50 }, \
{ ZYD_CR138, 0xa8 }, { ZYD_CR144, 0xac }, { ZYD_CR150, 0x0d }, \
{ ZYD_CR252, 0x34 }, { ZYD_CR253, 0x34 } \
}
#define ZYD_AL2230_PHY_PART1 \
{ \
{ ZYD_CR240, 0x57 }, { ZYD_CR9, 0xe0 } \
}
#define ZYD_AL2230_PHY_PART2 \
{ \
{ ZYD_CR251, 0x2f }, { ZYD_CR251, 0x7f }, \
}
#define ZYD_AL2230_PHY_PART3 \
{ \
{ ZYD_CR128, 0x14 }, { ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, \
}
#define ZYD_AL2230S_PHY_INIT \
{ \
{ ZYD_CR47, 0x1e }, { ZYD_CR106, 0x22 }, { ZYD_CR107, 0x2a }, \
{ ZYD_CR109, 0x13 }, { ZYD_CR118, 0xf8 }, { ZYD_CR119, 0x12 }, \
{ ZYD_CR122, 0xe0 }, { ZYD_CR128, 0x10 }, { ZYD_CR129, 0x0e }, \
{ ZYD_CR130, 0x10 } \
}
#define ZYD_AL2230_PHY_FINI_PART1 \
{ \
{ ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR79, 0x58 }, \
{ ZYD_CR12, 0xf0 }, { ZYD_CR77, 0x1b }, { ZYD_CR78, 0x58 }, \
{ ZYD_CR203, 0x06 }, { ZYD_CR240, 0x80 }, \
}
#define ZYD_AL2230_RF_PART1 \
{ \
0x03f790, 0x033331, 0x00000d, 0x0b3331, 0x03b812, 0x00fff3 \
}
#define ZYD_AL2230_RF_PART2 \
{ \
0x000da4, 0x0f4dc5, 0x0805b6, 0x011687, 0x000688, 0x0403b9, \
0x00dbba, 0x00099b, 0x0bdffc, 0x00000d, 0x00500f \
}
#define ZYD_AL2230_RF_PART3 \
{ \
0x00d00f, 0x004c0f, 0x00540f, 0x00700f, 0x00500f \
}
#define ZYD_AL2230_RF_B \
{ \
0x03f790, 0x033331, 0x00000d, 0x0b3331, 0x03b812, 0x00fff3, \
0x0005a4, 0x0f4dc5, 0x0805b6, 0x0146c7, 0x000688, 0x0403b9, \
0x00dbba, 0x00099b, 0x0bdffc, 0x00000d, 0x00580f \
}
#define ZYD_AL2230_RF_B_PART1 \
{ \
0x8cccd0, 0x481dc0, 0xcfff00, 0x25a000 \
}
#define ZYD_AL2230_RF_B_PART2 \
{ \
0x25a000, 0xa3b2f0, 0x6da010, 0xe36280, 0x116000, 0x9dc020, \
0x5ddb00, 0xd99000, 0x3ffbd0, 0xb00000, 0xf01a00 \
}
#define ZYD_AL2230_RF_B_PART3 \
{ \
0xf01b00, 0xf01e00, 0xf01a00 \
}
#define ZYD_AL2230_CHANTABLE \
{ \
{ 0x03f790, 0x033331, 0x00000d }, \
{ 0x03f790, 0x0b3331, 0x00000d }, \
{ 0x03e790, 0x033331, 0x00000d }, \
{ 0x03e790, 0x0b3331, 0x00000d }, \
{ 0x03f7a0, 0x033331, 0x00000d }, \
{ 0x03f7a0, 0x0b3331, 0x00000d }, \
{ 0x03e7a0, 0x033331, 0x00000d }, \
{ 0x03e7a0, 0x0b3331, 0x00000d }, \
{ 0x03f7b0, 0x033331, 0x00000d }, \
{ 0x03f7b0, 0x0b3331, 0x00000d }, \
{ 0x03e7b0, 0x033331, 0x00000d }, \
{ 0x03e7b0, 0x0b3331, 0x00000d }, \
{ 0x03f7c0, 0x033331, 0x00000d }, \
{ 0x03e7c0, 0x066661, 0x00000d } \
}
#define ZYD_AL2230_CHANTABLE_B \
{ \
{ 0x09efc0, 0x8cccc0, 0xb00000 }, \
{ 0x09efc0, 0x8cccd0, 0xb00000 }, \
{ 0x09e7c0, 0x8cccc0, 0xb00000 }, \
{ 0x09e7c0, 0x8cccd0, 0xb00000 }, \
{ 0x05efc0, 0x8cccc0, 0xb00000 }, \
{ 0x05efc0, 0x8cccd0, 0xb00000 }, \
{ 0x05e7c0, 0x8cccc0, 0xb00000 }, \
{ 0x05e7c0, 0x8cccd0, 0xb00000 }, \
{ 0x0defc0, 0x8cccc0, 0xb00000 }, \
{ 0x0defc0, 0x8cccd0, 0xb00000 }, \
{ 0x0de7c0, 0x8cccc0, 0xb00000 }, \
{ 0x0de7c0, 0x8cccd0, 0xb00000 }, \
{ 0x03efc0, 0x8cccc0, 0xb00000 }, \
{ 0x03e7c0, 0x866660, 0xb00000 } \
}
#define ZYD_AL7230B_PHY_1 \
{ \
{ ZYD_CR240, 0x57 }, { ZYD_CR15, 0x20 }, { ZYD_CR23, 0x40 }, \
{ ZYD_CR24, 0x20 }, { ZYD_CR26, 0x11 }, { ZYD_CR28, 0x3e }, \
{ ZYD_CR29, 0x00 }, { ZYD_CR44, 0x33 }, { ZYD_CR106, 0x22 }, \
{ ZYD_CR107, 0x1a }, { ZYD_CR109, 0x09 }, { ZYD_CR110, 0x27 }, \
{ ZYD_CR111, 0x2b }, { ZYD_CR112, 0x2b }, { ZYD_CR119, 0x0a }, \
{ ZYD_CR122, 0xfc }, { ZYD_CR10, 0x89 }, { ZYD_CR17, 0x28 }, \
{ ZYD_CR26, 0x93 }, { ZYD_CR34, 0x30 }, { ZYD_CR35, 0x3e }, \
{ ZYD_CR41, 0x24 }, { ZYD_CR44, 0x32 }, { ZYD_CR46, 0x96 }, \
{ ZYD_CR47, 0x1e }, { ZYD_CR79, 0x58 }, { ZYD_CR80, 0x30 }, \
{ ZYD_CR81, 0x30 }, { ZYD_CR87, 0x0a }, { ZYD_CR89, 0x04 }, \
{ ZYD_CR92, 0x0a }, { ZYD_CR99, 0x28 }, { ZYD_CR100, 0x02 }, \
{ ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, { ZYD_CR106, 0x22 }, \
{ ZYD_CR107, 0x3f }, { ZYD_CR109, 0x09 }, { ZYD_CR110, 0x1f }, \
{ ZYD_CR111, 0x1f }, { ZYD_CR112, 0x1f }, { ZYD_CR113, 0x27 }, \
{ ZYD_CR114, 0x27 }, { ZYD_CR115, 0x24 }, { ZYD_CR116, 0x3f }, \
{ ZYD_CR117, 0xfa }, { ZYD_CR118, 0xfc }, { ZYD_CR119, 0x10 }, \
{ ZYD_CR120, 0x4f }, { ZYD_CR121, 0x77 }, { ZYD_CR137, 0x88 }, \
{ ZYD_CR138, 0xa8 }, { ZYD_CR252, 0x34 }, { ZYD_CR253, 0x34 }, \
{ ZYD_CR251, 0x2f } \
}
#define ZYD_AL7230B_PHY_2 \
{ \
{ ZYD_CR251, 0x3f }, { ZYD_CR128, 0x14 }, { ZYD_CR129, 0x12 }, \
{ ZYD_CR130, 0x10 }, { ZYD_CR38, 0x38 }, { ZYD_CR136, 0xdf } \
}
#define ZYD_AL7230B_PHY_3 \
{ \
{ ZYD_CR203, 0x06 }, { ZYD_CR240, 0x80 } \
}
#define ZYD_AL7230B_RF_1 \
{ \
0x09ec04, 0x8cccc8, 0x4ff821, 0xc5fbfc, 0x21ebfe, 0xafd401, \
0x6cf56a, 0xe04073, 0x193d76, 0x9dd844, 0x500007, 0xd8c010, \
0x3c9000, 0xbfffff, 0x700000, 0xf15d58 \
}
#define ZYD_AL7230B_RF_2 \
{ \
0xf15d59, 0xf15d5c, 0xf15d58 \
}
#define ZYD_AL7230B_RF_SETCHANNEL \
{ \
0x4ff821, 0xc5fbfc, 0x21ebfe, 0xafd401, 0x6cf56a, 0xe04073, \
0x193d76, 0x9dd844, 0x500007, 0xd8c010, 0x3c9000, 0xf15d58 \
}
#define ZYD_AL7230B_CHANTABLE \
{ \
{ 0x09ec00, 0x8cccc8 }, \
{ 0x09ec00, 0x8cccd8 }, \
{ 0x09ec00, 0x8cccc0 }, \
{ 0x09ec00, 0x8cccd0 }, \
{ 0x05ec00, 0x8cccc8 }, \
{ 0x05ec00, 0x8cccd8 }, \
{ 0x05ec00, 0x8cccc0 }, \
{ 0x05ec00, 0x8cccd0 }, \
{ 0x0dec00, 0x8cccc8 }, \
{ 0x0dec00, 0x8cccd8 }, \
{ 0x0dec00, 0x8cccc0 }, \
{ 0x0dec00, 0x8cccd0 }, \
{ 0x03ec00, 0x8cccc8 }, \
{ 0x03ec00, 0x866660 } \
}
#define ZYD_AL2210_PHY \
{ \
{ ZYD_CR9, 0xe0 }, { ZYD_CR10, 0x91 }, { ZYD_CR12, 0x90 }, \
{ ZYD_CR15, 0xd0 }, { ZYD_CR16, 0x40 }, { ZYD_CR17, 0x58 }, \
{ ZYD_CR18, 0x04 }, { ZYD_CR23, 0x66 }, { ZYD_CR24, 0x14 }, \
{ ZYD_CR26, 0x90 }, { ZYD_CR31, 0x80 }, { ZYD_CR34, 0x06 }, \
{ ZYD_CR35, 0x3e }, { ZYD_CR38, 0x38 }, { ZYD_CR46, 0x90 }, \
{ ZYD_CR47, 0x1e }, { ZYD_CR64, 0x64 }, { ZYD_CR79, 0xb5 }, \
{ ZYD_CR80, 0x38 }, { ZYD_CR81, 0x30 }, { ZYD_CR113, 0xc0 }, \
{ ZYD_CR127, 0x03 } \
}
#define ZYD_AL2210_RF \
{ \
0x2396c0, 0x00fcb1, 0x358132, 0x0108b3, 0xc77804, 0x456415, \
0xff2226, 0x806667, 0x7860f8, 0xbb01c9, 0x00000a, 0x00000b \
}
#define ZYD_AL2210_CHANTABLE \
{ \
0x0196c0, 0x019710, 0x019760, 0x0197b0, 0x019800, 0x019850, \
0x0198a0, 0x0198f0, 0x019940, 0x019990, 0x0199e0, 0x019a30, \
0x019a80, 0x019b40 \
}
#define ZYD_GCT_PHY \
{ \
{ ZYD_CR10, 0x89 }, { ZYD_CR15, 0x20 }, { ZYD_CR17, 0x28 }, \
{ ZYD_CR23, 0x38 }, { ZYD_CR24, 0x20 }, { ZYD_CR26, 0x93 }, \
{ ZYD_CR27, 0x15 }, { ZYD_CR28, 0x3e }, { ZYD_CR29, 0x00 }, \
{ ZYD_CR33, 0x28 }, { ZYD_CR34, 0x30 }, { ZYD_CR35, 0x43 }, \
{ ZYD_CR41, 0x24 }, { ZYD_CR44, 0x32 }, { ZYD_CR46, 0x92 }, \
{ ZYD_CR47, 0x1e }, { ZYD_CR48, 0x04 }, { ZYD_CR49, 0xfa }, \
{ ZYD_CR79, 0x58 }, { ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, \
{ ZYD_CR87, 0x0a }, { ZYD_CR89, 0x04 }, { ZYD_CR91, 0x00 }, \
{ ZYD_CR92, 0x0a }, { ZYD_CR98, 0x8d }, { ZYD_CR99, 0x28 }, \
{ ZYD_CR100, 0x02 }, { ZYD_CR101, 0x09 }, { ZYD_CR102, 0x27 }, \
{ ZYD_CR106, 0x1c }, { ZYD_CR107, 0x1c }, { ZYD_CR109, 0x13 }, \
{ ZYD_CR110, 0x1f }, { ZYD_CR111, 0x13 }, { ZYD_CR112, 0x1f }, \
{ ZYD_CR113, 0x27 }, { ZYD_CR114, 0x23 }, { ZYD_CR115, 0x24 }, \
{ ZYD_CR116, 0x24 }, { ZYD_CR117, 0xfa }, { ZYD_CR118, 0xf0 }, \
{ ZYD_CR119, 0x1a }, { ZYD_CR120, 0x4f }, { ZYD_CR121, 0x1f }, \
{ ZYD_CR122, 0xf0 }, { ZYD_CR123, 0x57 }, { ZYD_CR125, 0xad }, \
{ ZYD_CR126, 0x6c }, { ZYD_CR127, 0x03 }, { ZYD_CR128, 0x14 }, \
{ ZYD_CR129, 0x12 }, { ZYD_CR130, 0x10 }, { ZYD_CR137, 0x50 }, \
{ ZYD_CR138, 0xa8 }, { ZYD_CR144, 0xac }, { ZYD_CR146, 0x20 }, \
{ ZYD_CR252, 0xff }, { ZYD_CR253, 0xff } \
}
#define ZYD_GCT_RF \
{ \
0x40002b, 0x519e4f, 0x6f81ad, 0x73fffe, 0x25f9c, 0x100047, \
0x200999, 0x307602, 0x346063, \
}
#define ZYD_GCT_VCO \
{ \
{ 0x664d, 0x604d, 0x6675, 0x6475, 0x6655, 0x6455, 0x6665 }, \
{ 0x666d, 0x606d, 0x664d, 0x644d, 0x6675, 0x6475, 0x6655 }, \
{ 0x665d, 0x605d, 0x666d, 0x646d, 0x664d, 0x644d, 0x6675 }, \
{ 0x667d, 0x607d, 0x665d, 0x645d, 0x666d, 0x646d, 0x664d }, \
{ 0x6643, 0x6043, 0x667d, 0x647d, 0x665d, 0x645d, 0x666d }, \
{ 0x6663, 0x6063, 0x6643, 0x6443, 0x667d, 0x647d, 0x665d }, \
{ 0x6653, 0x6053, 0x6663, 0x6463, 0x6643, 0x6443, 0x667d }, \
{ 0x6673, 0x6073, 0x6653, 0x6453, 0x6663, 0x6463, 0x6643 }, \
{ 0x664b, 0x604b, 0x6673, 0x6473, 0x6653, 0x6453, 0x6663 }, \
{ 0x666b, 0x606b, 0x664b, 0x644b, 0x6673, 0x6473, 0x6653 }, \
{ 0x665b, 0x605b, 0x666b, 0x646b, 0x664b, 0x644b, 0x6673 } \
}
#define ZYD_GCT_TXGAIN \
{ \
0x0e313, 0x0fb13, 0x0e093, 0x0f893, 0x0ea93, 0x1f093, 0x1f493, \
0x1f693, 0x1f393, 0x1f35b, 0x1e6db, 0x1ff3f, 0x1ffff, 0x361d7, \
0x37fbf, 0x3ff8b, 0x3ff33, 0x3fb3f, 0x3ffff \
}
#define ZYD_GCT_CHANNEL_ACAL \
{ \
0x106847, 0x106847, 0x106867, 0x106867, 0x106867, 0x106867, \
0x106857, 0x106857, 0x106857, 0x106857, 0x106877, 0x106877, \
0x106877, 0x10684f \
}
#define ZYD_GCT_CHANNEL_STD \
{ \
0x100047, 0x100047, 0x100067, 0x100067, 0x100067, 0x100067, \
0x100057, 0x100057, 0x100057, 0x100057, 0x100077, 0x100077, \
0x100077, 0x10004f \
}
#define ZYD_GCT_CHANNEL_DIV \
{ \
0x200999, 0x20099b, 0x200998, 0x20099a, 0x200999, 0x20099b, \
0x200998, 0x20099a, 0x200999, 0x20099b, 0x200998, 0x20099a, \
0x200999, 0x200ccc \
}
#define ZYD_MAXIM2_PHY \
{ \
{ ZYD_CR23, 0x40 }, { ZYD_CR15, 0x20 }, { ZYD_CR28, 0x3e }, \
{ ZYD_CR29, 0x00 }, { ZYD_CR26, 0x11 }, { ZYD_CR44, 0x33 }, \
{ ZYD_CR106, 0x2a }, { ZYD_CR107, 0x1a }, { ZYD_CR109, 0x2b }, \
{ ZYD_CR110, 0x2b }, { ZYD_CR111, 0x2b }, { ZYD_CR112, 0x2b }, \
{ ZYD_CR10, 0x89 }, { ZYD_CR17, 0x20 }, { ZYD_CR26, 0x93 }, \
{ ZYD_CR34, 0x30 }, { ZYD_CR35, 0x40 }, { ZYD_CR41, 0x24 }, \
{ ZYD_CR44, 0x32 }, { ZYD_CR46, 0x90 }, { ZYD_CR89, 0x18 }, \
{ ZYD_CR92, 0x0a }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \
{ ZYD_CR106, 0x20 }, { ZYD_CR107, 0x24 }, { ZYD_CR109, 0x09 }, \
{ ZYD_CR110, 0x13 }, { ZYD_CR111, 0x13 }, { ZYD_CR112, 0x13 }, \
{ ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x24 }, \
{ ZYD_CR116, 0x24 }, { ZYD_CR117, 0xf4 }, { ZYD_CR118, 0xfa }, \
{ ZYD_CR120, 0x4f }, { ZYD_CR121, 0x77 }, { ZYD_CR122, 0xfe }, \
{ ZYD_CR10, 0x89 }, { ZYD_CR17, 0x20 }, { ZYD_CR26, 0x93 }, \
{ ZYD_CR34, 0x30 }, { ZYD_CR35, 0x40 }, { ZYD_CR41, 0x24 }, \
{ ZYD_CR44, 0x32 }, { ZYD_CR46, 0x90 }, { ZYD_CR79, 0x58 }, \
{ ZYD_CR80, 0x30 }, { ZYD_CR81, 0x30 }, { ZYD_CR89, 0x18 }, \
{ ZYD_CR92, 0x0a }, { ZYD_CR101, 0x13 }, { ZYD_CR102, 0x27 }, \
{ ZYD_CR106, 0x20 }, { ZYD_CR107, 0x24 }, { ZYD_CR109, 0x09 }, \
{ ZYD_CR110, 0x13 }, { ZYD_CR111, 0x13 }, { ZYD_CR112, 0x13 }, \
{ ZYD_CR113, 0x27 }, { ZYD_CR114, 0x27 }, { ZYD_CR115, 0x24 }, \
{ ZYD_CR116, 0x24 }, { ZYD_CR117, 0xf4 }, { ZYD_CR118, 0x00 }, \
{ ZYD_CR120, 0x4f }, { ZYD_CR121, 0x06 }, { ZYD_CR122, 0xfe } \
}
#define ZYD_MAXIM2_RF \
{ \
0x33334, 0x10a03, 0x00400, 0x00ca1, 0x10072, 0x18645, 0x04006, \
0x000a7, 0x08258, 0x03fc9, 0x0040a, 0x0000b, 0x0026c \
}
#define ZYD_MAXIM2_CHANTABLE_F \
{ \
0x33334, 0x08884, 0x1ddd4, 0x33334, 0x08884, 0x1ddd4, 0x33334, \
0x08884, 0x1ddd4, 0x33334, 0x08884, 0x1ddd4, 0x33334, 0x26664 \
}
#define ZYD_MAXIM2_CHANTABLE \
{ \
{ 0x33334, 0x10a03 }, \
{ 0x08884, 0x20a13 }, \
{ 0x1ddd4, 0x30a13 }, \
{ 0x33334, 0x10a13 }, \
{ 0x08884, 0x20a23 }, \
{ 0x1ddd4, 0x30a23 }, \
{ 0x33334, 0x10a23 }, \
{ 0x08884, 0x20a33 }, \
{ 0x1ddd4, 0x30a33 }, \
{ 0x33334, 0x10a33 }, \
{ 0x08884, 0x20a43 }, \
{ 0x1ddd4, 0x30a43 }, \
{ 0x33334, 0x10a43 }, \
{ 0x26664, 0x20a53 } \
}
#define ZYD_TX_RATEDIV \
{ \
0x1, 0x2, 0xb, 0xb, 0x1, 0x1, 0x1, 0x1, 0x30, 0x18, 0xc, 0x6, \
0x36, 0x24, 0x12, 0x9 \
}
/*
* Control pipe requests.
*/
#define ZYD_DOWNLOADREQ 0x30
#define ZYD_DOWNLOADSTS 0x31
#define ZYD_READFWDATAREQ 0x32
/* possible values for register ZYD_CR_INTERRUPT */
#define ZYD_HWINT_MASK 0x004f0000
/* possible values for register ZYD_MAC_MISC */
#define ZYD_UNLOCK_PHY_REGS 0x80
/* possible values for register ZYD_MAC_ENCRYPTION_TYPE */
#define ZYD_ENC_SNIFFER 8
/* flags for register ZYD_MAC_RXFILTER */
#define ZYD_FILTER_ASS_REQ (1 << 0)
#define ZYD_FILTER_ASS_RSP (1 << 1)
#define ZYD_FILTER_REASS_REQ (1 << 2)
#define ZYD_FILTER_REASS_RSP (1 << 3)
#define ZYD_FILTER_PRB_REQ (1 << 4)
#define ZYD_FILTER_PRB_RSP (1 << 5)
#define ZYD_FILTER_BCN (1 << 8)
#define ZYD_FILTER_ATIM (1 << 9)
#define ZYD_FILTER_DEASS (1 << 10)
#define ZYD_FILTER_AUTH (1 << 11)
#define ZYD_FILTER_DEAUTH (1 << 12)
#define ZYD_FILTER_PS_POLL (1 << 26)
#define ZYD_FILTER_RTS (1 << 27)
#define ZYD_FILTER_CTS (1 << 28)
#define ZYD_FILTER_ACK (1 << 29)
#define ZYD_FILTER_CFE (1 << 30)
#define ZYD_FILTER_CFE_A (1U << 31)
/* helpers for register ZYD_MAC_RXFILTER */
#define ZYD_FILTER_MONITOR 0xffffffff
#define ZYD_FILTER_BSS \
(ZYD_FILTER_ASS_REQ | ZYD_FILTER_ASS_RSP | \
ZYD_FILTER_REASS_REQ | ZYD_FILTER_REASS_RSP | \
ZYD_FILTER_PRB_REQ | ZYD_FILTER_PRB_RSP | \
(0x3 << 6) | \
ZYD_FILTER_BCN | ZYD_FILTER_ATIM | ZYD_FILTER_DEASS | \
ZYD_FILTER_AUTH | ZYD_FILTER_DEAUTH | \
(0x7 << 13) | \
ZYD_FILTER_PS_POLL | ZYD_FILTER_ACK)
#define ZYD_FILTER_HOSTAP \
(ZYD_FILTER_ASS_REQ | ZYD_FILTER_REASS_REQ | \
ZYD_FILTER_PRB_REQ | ZYD_FILTER_DEASS | ZYD_FILTER_AUTH | \
ZYD_FILTER_DEAUTH | ZYD_FILTER_PS_POLL)
struct zyd_tx_desc {
uint8_t phy;
#define ZYD_TX_PHY_SIGNAL(x) ((x) & 0xf)
#define ZYD_TX_PHY_OFDM (1 << 4)
#define ZYD_TX_PHY_SHPREAMBLE (1 << 5) /* CCK */
#define ZYD_TX_PHY_5GHZ (1 << 5) /* OFDM */
uint16_t len;
uint8_t flags;
#define ZYD_TX_FLAG_BACKOFF (1 << 0)
#define ZYD_TX_FLAG_MULTICAST (1 << 1)
#define ZYD_TX_FLAG_TYPE(x) (((x) & 0x3) << 2)
#define ZYD_TX_TYPE_DATA 0
#define ZYD_TX_TYPE_PS_POLL 1
#define ZYD_TX_TYPE_MGMT 2
#define ZYD_TX_TYPE_CTL 3
#define ZYD_TX_FLAG_WAKEUP (1 << 4)
#define ZYD_TX_FLAG_RTS (1 << 5)
#define ZYD_TX_FLAG_ENCRYPT (1 << 6)
#define ZYD_TX_FLAG_CTS_TO_SELF (1 << 7)
uint16_t pktlen;
uint16_t plcp_length;
uint8_t plcp_service;
#define ZYD_PLCP_LENGEXT 0x80
uint16_t nextlen;
} __packed;
struct zyd_plcphdr {
uint8_t signal;
uint8_t reserved[2];
uint16_t service; /* unaligned! */
} __packed;
struct zyd_rx_stat {
uint8_t signal_cck;
uint8_t rssi;
uint8_t signal_ofdm;
uint8_t cipher;
#define ZYD_RX_CIPHER_WEP64 1
#define ZYD_RX_CIPHER_TKIP 2
#define ZYD_RX_CIPHER_AES 4
#define ZYD_RX_CIPHER_WEP128 5
#define ZYD_RX_CIPHER_WEP256 6
#define ZYD_RX_CIPHER_WEP \
(ZYD_RX_CIPHER_WEP64 | ZYD_RX_CIPHER_WEP128 | ZYD_RX_CIPHER_WEP256)
uint8_t flags;
#define ZYD_RX_OFDM (1 << 0)
#define ZYD_RX_TIMEOUT (1 << 1)
#define ZYD_RX_OVERRUN (1 << 2)
#define ZYD_RX_DECRYPTERR (1 << 3)
#define ZYD_RX_BADCRC32 (1 << 4)
#define ZYD_RX_NOT2ME (1 << 5)
#define ZYD_RX_BADCRC16 (1 << 6)
#define ZYD_RX_ERROR (1 << 7)
} __packed;
/* this structure may be unaligned */
struct zyd_rx_desc {
#define ZYD_MAX_RXFRAMECNT 3
uWord len[ZYD_MAX_RXFRAMECNT];
uWord tag;
#define ZYD_TAG_MULTIFRAME 0x697e
} __packed;
/* I2C bus alike */
struct zyd_rfwrite_cmd {
uint16_t code;
uint16_t width;
uint16_t bit[32];
#define ZYD_RF_IF_LE (1 << 1)
#define ZYD_RF_CLK (1 << 2)
#define ZYD_RF_DATA (1 << 3)
} __packed;
struct zyd_cmd {
uint16_t code;
#define ZYD_CMD_IOWR 0x0021 /* write HMAC or PHY register */
#define ZYD_CMD_IORD 0x0022 /* read HMAC or PHY register */
#define ZYD_CMD_RFCFG 0x0023 /* write RF register */
#define ZYD_NOTIF_IORD 0x9001 /* response for ZYD_CMD_IORD */
#define ZYD_NOTIF_MACINTR 0x9001 /* interrupt notification */
#define ZYD_NOTIF_RETRYSTATUS 0xa001 /* Tx retry notification */
uint8_t data[64];
} __packed;
/* structure for command ZYD_CMD_IOWR */
struct zyd_pair {
uint16_t reg;
/* helpers macros to read/write 32-bit registers */
#define ZYD_REG32_LO(reg) (reg)
#define ZYD_REG32_HI(reg) \
((reg) + ((((reg) & 0xf000) == 0x9000) ? 2 : 1))
uint16_t val;
} __packed;
/* structure for notification ZYD_NOTIF_RETRYSTATUS */
struct zyd_notif_retry {
uint16_t rate;
uint8_t macaddr[IEEE80211_ADDR_LEN];
uint16_t count;
} __packed;
#define ZYD_CONFIG_INDEX 0
#define ZYD_IFACE_INDEX 0
#define ZYD_INTR_TIMEOUT 1000
#define ZYD_TX_TIMEOUT 10000
#define ZYD_MAX_TXBUFSZ \
(sizeof(struct zyd_tx_desc) + MCLBYTES)
#define ZYD_MIN_FRAGSZ \
(sizeof(struct zyd_plcphdr) + IEEE80211_MIN_LEN + \
sizeof(struct zyd_rx_stat))
#define ZYD_MIN_RXBUFSZ ZYD_MIN_FRAGSZ
#define ZYX_MAX_RXBUFSZ \
((sizeof (struct zyd_plcphdr) + IEEE80211_MAX_LEN + \
sizeof (struct zyd_rx_stat)) * ZYD_MAX_RXFRAMECNT + \
sizeof (struct zyd_rx_desc))
#define ZYD_TX_DESC_SIZE (sizeof (struct zyd_tx_desc))
#define ZYD_RX_LIST_CNT 1
#define ZYD_TX_LIST_CNT 5
#define ZYD_CMD_FLAG_READ (1 << 0)
#define ZYD_CMD_FLAG_SENT (1 << 1)
/* quickly determine if a given rate is CCK or OFDM */
#define ZYD_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
struct zyd_phy_pair {
uint16_t reg;
uint8_t val;
};
struct zyd_mac_pair {
uint16_t reg;
uint32_t val;
};
struct zyd_tx_data {
STAILQ_ENTRY(zyd_tx_data) next;
struct zyd_softc *sc;
struct zyd_tx_desc desc;
struct mbuf *m;
struct ieee80211_node *ni;
int rate;
};
typedef STAILQ_HEAD(, zyd_tx_data) zyd_txdhead;
struct zyd_rx_data {
struct mbuf *m;
int rssi;
};
struct zyd_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_antsignal;
int8_t wr_antnoise;
} __packed __aligned(8);
#define ZYD_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct zyd_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed __aligned(8);
#define ZYD_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct zyd_softc; /* forward declaration */
struct zyd_rf {
/* RF methods */
int (*init)(struct zyd_rf *);
int (*switch_radio)(struct zyd_rf *, int);
int (*set_channel)(struct zyd_rf *, uint8_t);
int (*bandedge6)(struct zyd_rf *,
struct ieee80211_channel *);
/* RF attributes */
struct zyd_softc *rf_sc; /* back-pointer */
int width;
int idx; /* for GIT RF */
int update_pwr;
};
struct zyd_rq {
struct zyd_cmd *cmd;
const uint16_t *idata;
struct zyd_pair *odata;
int ilen;
int olen;
int flags;
STAILQ_ENTRY(zyd_rq) rq;
};
struct zyd_vap {
struct ieee80211vap vap;
int (*newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define ZYD_VAP(vap) ((struct zyd_vap *)(vap))
enum {
ZYD_BULK_WR,
ZYD_BULK_RD,
ZYD_INTR_WR,
ZYD_INTR_RD,
ZYD_N_TRANSFER = 4,
};
struct zyd_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct usb_device *sc_udev;
struct usb_xfer *sc_xfer[ZYD_N_TRANSFER];
int sc_flags;
#define ZYD_FLAG_FWLOADED (1 << 0)
#define ZYD_FLAG_INITONCE (1 << 1)
#define ZYD_FLAG_INITDONE (1 << 2)
#define ZYD_FLAG_DETACHED (1 << 3)
+#define ZYD_FLAG_RUNNING (1 << 4)
struct zyd_rf sc_rf;
STAILQ_HEAD(, zyd_rq) sc_rtx;
STAILQ_HEAD(, zyd_rq) sc_rqh;
- uint8_t sc_bssid[IEEE80211_ADDR_LEN];
uint16_t sc_fwbase;
uint8_t sc_regdomain;
uint8_t sc_macrev;
uint16_t sc_fwrev;
uint8_t sc_rfrev;
uint8_t sc_parev;
uint8_t sc_al2230s;
uint8_t sc_bandedge6;
uint8_t sc_newphy;
uint8_t sc_cckgain;
uint8_t sc_fix_cr157;
uint8_t sc_ledtype;
uint8_t sc_txled;
uint32_t sc_atim_wnd;
uint32_t sc_pre_tbtt;
uint32_t sc_bcn_int;
uint8_t sc_pwrcal[14];
uint8_t sc_pwrint[14];
uint8_t sc_ofdm36_cal[14];
uint8_t sc_ofdm48_cal[14];
uint8_t sc_ofdm54_cal[14];
struct mtx sc_mtx;
struct zyd_tx_data tx_data[ZYD_TX_LIST_CNT];
zyd_txdhead tx_q;
zyd_txdhead tx_free;
int tx_nfree;
struct zyd_rx_desc sc_rx_desc;
struct zyd_rx_data sc_rx_data[ZYD_MAX_RXFRAMECNT];
int sc_rx_count;
struct zyd_cmd sc_ibuf;
struct zyd_rx_radiotap_header sc_rxtap;
int sc_rxtap_len;
struct zyd_tx_radiotap_header sc_txtap;
int sc_txtap_len;
};
#define ZYD_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
#define ZYD_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
#define ZYD_LOCK_ASSERT(sc, t) mtx_assert(&(sc)->sc_mtx, t)
Index: head/sys/dev/wi/if_wi.c
===================================================================
--- head/sys/dev/wi/if_wi.c (revision 287196)
+++ head/sys/dev/wi/if_wi.c (revision 287197)
@@ -1,2138 +1,2053 @@
/*-
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Lucent WaveLAN/IEEE 802.11 PCMCIA driver.
*
* Original FreeBSD driver written by Bill Paul <wpaul@ctr.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
/*
* The WaveLAN/IEEE adapter is the second generation of the WaveLAN
* from Lucent. Unlike the older cards, the new ones are programmed
* entirely via a firmware-driven controller called the Hermes.
* Unfortunately, Lucent will not release the Hermes programming manual
* without an NDA (if at all). What they do release is an API library
* called the HCF (Hardware Control Functions) which is supposed to
* do the device-specific operations of a device driver for you. The
* publically available version of the HCF library (the 'HCF Light') is
* a) extremely gross, b) lacks certain features, particularly support
* for 802.11 frames, and c) is contaminated by the GNU Public License.
*
* This driver does not use the HCF or HCF Light at all. Instead, it
* programs the Hermes controller directly, using information gleaned
* from the HCF Light code and corresponding documentation.
*
* This driver supports the ISA, PCMCIA and PCI versions of the Lucent
* WaveLan cards (based on the Hermes chipset), as well as the newer
* Prism 2 chipsets with firmware from Intersil and Symbol.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_wlan.h"
#define WI_HERMES_STATS_WAR /* Work around stats counter bug. */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/endian.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/random.h>
#include <sys/syslog.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/atomic.h>
#include <sys/rman.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_ioctl.h>
#include <net80211/ieee80211_radiotap.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/ip.h>
#include <netinet/if_ether.h>
#include <net/bpf.h>
#include <dev/wi/if_wavelan_ieee.h>
#include <dev/wi/if_wireg.h>
#include <dev/wi/if_wivar.h>
static struct ieee80211vap *wi_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void wi_vap_delete(struct ieee80211vap *vap);
-static void wi_stop_locked(struct wi_softc *sc, int disable);
-static void wi_start_locked(struct ifnet *);
-static void wi_start(struct ifnet *);
-static int wi_start_tx(struct ifnet *ifp, struct wi_frame *frmhdr,
- struct mbuf *m0);
+static int wi_transmit(struct ieee80211com *, struct mbuf *);
+static void wi_start(struct wi_softc *);
+static int wi_start_tx(struct wi_softc *, struct wi_frame *, struct mbuf *);
static int wi_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
static int wi_newstate_sta(struct ieee80211vap *, enum ieee80211_state, int);
static int wi_newstate_hostap(struct ieee80211vap *, enum ieee80211_state,
int);
static void wi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
int subtype, const struct ieee80211_rx_stats *rxs,
int rssi, int nf);
static int wi_reset(struct wi_softc *);
static void wi_watchdog(void *);
-static int wi_ioctl(struct ifnet *, u_long, caddr_t);
+static void wi_parent(struct ieee80211com *);
static void wi_media_status(struct ifnet *, struct ifmediareq *);
-static uint64_t wi_get_counter(struct ifnet *, ift_counter);
-
static void wi_rx_intr(struct wi_softc *);
static void wi_tx_intr(struct wi_softc *);
static void wi_tx_ex_intr(struct wi_softc *);
static void wi_info_intr(struct wi_softc *);
static int wi_write_txrate(struct wi_softc *, struct ieee80211vap *);
static int wi_write_wep(struct wi_softc *, struct ieee80211vap *);
static int wi_write_multi(struct wi_softc *);
static void wi_update_mcast(struct ieee80211com *);
static void wi_update_promisc(struct ieee80211com *);
static int wi_alloc_fid(struct wi_softc *, int, int *);
static void wi_read_nicid(struct wi_softc *);
static int wi_write_ssid(struct wi_softc *, int, u_int8_t *, int);
static int wi_cmd(struct wi_softc *, int, int, int, int);
static int wi_seek_bap(struct wi_softc *, int, int);
static int wi_read_bap(struct wi_softc *, int, int, void *, int);
-static int wi_write_bap(struct wi_softc *, int, int, void *, int);
+static int wi_write_bap(struct wi_softc *, int, int, const void *, int);
static int wi_mwrite_bap(struct wi_softc *, int, int, struct mbuf *, int);
static int wi_read_rid(struct wi_softc *, int, void *, int *);
-static int wi_write_rid(struct wi_softc *, int, void *, int);
+static int wi_write_rid(struct wi_softc *, int, const void *, int);
static int wi_write_appie(struct wi_softc *, int, const struct ieee80211_appie *);
static void wi_scan_start(struct ieee80211com *);
static void wi_scan_end(struct ieee80211com *);
static void wi_set_channel(struct ieee80211com *);
static __inline int
wi_write_val(struct wi_softc *sc, int rid, u_int16_t val)
{
val = htole16(val);
return wi_write_rid(sc, rid, &val, sizeof(val));
}
static SYSCTL_NODE(_hw, OID_AUTO, wi, CTLFLAG_RD, 0,
"Wireless driver parameters");
static struct timeval lasttxerror; /* time of last tx error msg */
static int curtxeps; /* current tx error msgs/sec */
static int wi_txerate = 0; /* tx error rate: max msgs/sec */
SYSCTL_INT(_hw_wi, OID_AUTO, txerate, CTLFLAG_RW, &wi_txerate,
0, "max tx error msgs/sec; 0 to disable msgs");
#define WI_DEBUG
#ifdef WI_DEBUG
static int wi_debug = 0;
SYSCTL_INT(_hw_wi, OID_AUTO, debug, CTLFLAG_RW, &wi_debug,
0, "control debugging printfs");
#define DPRINTF(X) if (wi_debug) printf X
#else
#define DPRINTF(X)
#endif
#define WI_INTRS (WI_EV_RX | WI_EV_ALLOC | WI_EV_INFO)
struct wi_card_ident wi_card_ident[] = {
/* CARD_ID CARD_NAME FIRM_TYPE */
{ WI_NIC_LUCENT_ID, WI_NIC_LUCENT_STR, WI_LUCENT },
{ WI_NIC_SONY_ID, WI_NIC_SONY_STR, WI_LUCENT },
{ WI_NIC_LUCENT_EMB_ID, WI_NIC_LUCENT_EMB_STR, WI_LUCENT },
{ WI_NIC_EVB2_ID, WI_NIC_EVB2_STR, WI_INTERSIL },
{ WI_NIC_HWB3763_ID, WI_NIC_HWB3763_STR, WI_INTERSIL },
{ WI_NIC_HWB3163_ID, WI_NIC_HWB3163_STR, WI_INTERSIL },
{ WI_NIC_HWB3163B_ID, WI_NIC_HWB3163B_STR, WI_INTERSIL },
{ WI_NIC_EVB3_ID, WI_NIC_EVB3_STR, WI_INTERSIL },
{ WI_NIC_HWB1153_ID, WI_NIC_HWB1153_STR, WI_INTERSIL },
{ WI_NIC_P2_SST_ID, WI_NIC_P2_SST_STR, WI_INTERSIL },
{ WI_NIC_EVB2_SST_ID, WI_NIC_EVB2_SST_STR, WI_INTERSIL },
{ WI_NIC_3842_EVA_ID, WI_NIC_3842_EVA_STR, WI_INTERSIL },
{ WI_NIC_3842_PCMCIA_AMD_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL },
{ WI_NIC_3842_PCMCIA_SST_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL },
{ WI_NIC_3842_PCMCIA_ATL_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL },
{ WI_NIC_3842_PCMCIA_ATS_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL },
{ WI_NIC_3842_MINI_AMD_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL },
{ WI_NIC_3842_MINI_SST_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL },
{ WI_NIC_3842_MINI_ATL_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL },
{ WI_NIC_3842_MINI_ATS_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL },
{ WI_NIC_3842_PCI_AMD_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL },
{ WI_NIC_3842_PCI_SST_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL },
{ WI_NIC_3842_PCI_ATS_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL },
{ WI_NIC_3842_PCI_ATL_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL },
{ WI_NIC_P3_PCMCIA_AMD_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL },
{ WI_NIC_P3_PCMCIA_SST_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL },
{ WI_NIC_P3_PCMCIA_ATL_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL },
{ WI_NIC_P3_PCMCIA_ATS_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL },
{ WI_NIC_P3_MINI_AMD_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL },
{ WI_NIC_P3_MINI_SST_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL },
{ WI_NIC_P3_MINI_ATL_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL },
{ WI_NIC_P3_MINI_ATS_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL },
{ 0, NULL, 0 },
};
static char *wi_firmware_names[] = { "none", "Hermes", "Intersil", "Symbol" };
devclass_t wi_devclass;
int
wi_attach(device_t dev)
{
struct wi_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic;
- struct ifnet *ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
int i, nrates, buflen;
u_int16_t val;
u_int8_t ratebuf[2 + IEEE80211_RATE_SIZE];
struct ieee80211_rateset *rs;
struct sysctl_ctx_list *sctx;
struct sysctl_oid *soid;
static const u_int8_t empty_macaddr[IEEE80211_ADDR_LEN] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
int error;
- uint8_t macaddr[IEEE80211_ADDR_LEN];
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc\n");
- wi_free(dev);
- return ENOSPC;
- }
- ic = ifp->if_l2com;
-
sc->sc_firmware_type = WI_NOTYPE;
sc->wi_cmd_count = 500;
/* Reset the NIC. */
if (wi_reset(sc) != 0) {
wi_free(dev);
return ENXIO; /* XXX */
}
/* Read NIC identification */
wi_read_nicid(sc);
switch (sc->sc_firmware_type) {
case WI_LUCENT:
if (sc->sc_sta_firmware_ver < 60006)
goto reject;
break;
case WI_INTERSIL:
if (sc->sc_sta_firmware_ver < 800)
goto reject;
break;
default:
reject:
device_printf(dev, "Sorry, this card is not supported "
"(type %d, firmware ver %d)\n",
sc->sc_firmware_type, sc->sc_sta_firmware_ver);
wi_free(dev);
return EOPNOTSUPP;
}
/* Export info about the device via sysctl */
sctx = device_get_sysctl_ctx(dev);
soid = device_get_sysctl_tree(dev);
SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
"firmware_type", CTLFLAG_RD,
wi_firmware_names[sc->sc_firmware_type], 0,
"Firmware type string");
SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "sta_version",
CTLFLAG_RD, &sc->sc_sta_firmware_ver, 0,
"Station Firmware version");
if (sc->sc_firmware_type == WI_INTERSIL)
SYSCTL_ADD_INT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
"pri_version", CTLFLAG_RD, &sc->sc_pri_firmware_ver, 0,
"Primary Firmware version");
SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "nic_id",
CTLFLAG_RD, &sc->sc_nic_id, 0, "NIC id");
SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "nic_name",
CTLFLAG_RD, sc->sc_nic_name, 0, "NIC name");
mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF | MTX_RECURSE);
callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
/*
* Read the station address.
* And do it twice. I've seen PRISM-based cards that return
* an error when trying to read it the first time, which causes
* the probe to fail.
*/
buflen = IEEE80211_ADDR_LEN;
- error = wi_read_rid(sc, WI_RID_MAC_NODE, macaddr, &buflen);
+ error = wi_read_rid(sc, WI_RID_MAC_NODE, &ic->ic_macaddr, &buflen);
if (error != 0) {
buflen = IEEE80211_ADDR_LEN;
- error = wi_read_rid(sc, WI_RID_MAC_NODE, macaddr, &buflen);
+ error = wi_read_rid(sc, WI_RID_MAC_NODE, &ic->ic_macaddr,
+ &buflen);
}
- if (error || IEEE80211_ADDR_EQ(macaddr, empty_macaddr)) {
+ if (error || IEEE80211_ADDR_EQ(&ic->ic_macaddr, empty_macaddr)) {
if (error != 0)
device_printf(dev, "mac read failed %d\n", error);
else {
device_printf(dev, "mac read failed (all zeros)\n");
error = ENXIO;
}
wi_free(dev);
return (error);
}
- ifp->if_softc = sc;
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = wi_ioctl;
- ifp->if_start = wi_start;
- ifp->if_init = wi_init;
- ifp->if_get_counter = wi_get_counter;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_phytype = IEEE80211_T_DS;
ic->ic_opmode = IEEE80211_M_STA;
ic->ic_caps = IEEE80211_C_STA
| IEEE80211_C_PMGT
| IEEE80211_C_MONITOR
;
/*
* Query the card for available channels and setup the
* channel table. We assume these are all 11b channels.
*/
buflen = sizeof(val);
if (wi_read_rid(sc, WI_RID_CHANNEL_LIST, &val, &buflen) != 0)
val = htole16(0x1fff); /* assume 1-11 */
KASSERT(val != 0, ("wi_attach: no available channels listed!"));
val <<= 1; /* shift for base 1 indices */
for (i = 1; i < 16; i++) {
struct ieee80211_channel *c;
if (!isset((u_int8_t*)&val, i))
continue;
c = &ic->ic_channels[ic->ic_nchans++];
c->ic_freq = ieee80211_ieee2mhz(i, IEEE80211_CHAN_B);
c->ic_flags = IEEE80211_CHAN_B;
c->ic_ieee = i;
/* XXX txpowers? */
}
/*
* Set flags based on firmware version.
*/
switch (sc->sc_firmware_type) {
case WI_LUCENT:
sc->sc_ntxbuf = 1;
ic->ic_caps |= IEEE80211_C_IBSS;
sc->sc_ibss_port = WI_PORTTYPE_BSS;
sc->sc_monitor_port = WI_PORTTYPE_ADHOC;
sc->sc_min_rssi = WI_LUCENT_MIN_RSSI;
sc->sc_max_rssi = WI_LUCENT_MAX_RSSI;
sc->sc_dbm_offset = WI_LUCENT_DBM_OFFSET;
break;
case WI_INTERSIL:
sc->sc_ntxbuf = WI_NTXBUF;
sc->sc_flags |= WI_FLAGS_HAS_FRAGTHR
| WI_FLAGS_HAS_ROAMING;
/*
* Old firmware are slow, so give peace a chance.
*/
if (sc->sc_sta_firmware_ver < 10000)
sc->wi_cmd_count = 5000;
if (sc->sc_sta_firmware_ver > 10101)
sc->sc_flags |= WI_FLAGS_HAS_DBMADJUST;
ic->ic_caps |= IEEE80211_C_IBSS;
/*
* version 0.8.3 and newer are the only ones that are known
* to currently work. Earlier versions can be made to work,
* at least according to the Linux driver but we require
* monitor mode so this is irrelevant.
*/
ic->ic_caps |= IEEE80211_C_HOSTAP;
if (sc->sc_sta_firmware_ver >= 10603)
sc->sc_flags |= WI_FLAGS_HAS_ENHSECURITY;
if (sc->sc_sta_firmware_ver >= 10700) {
/*
* 1.7.0+ have the necessary support for sta mode WPA.
*/
sc->sc_flags |= WI_FLAGS_HAS_WPASUPPORT;
ic->ic_caps |= IEEE80211_C_WPA;
}
sc->sc_ibss_port = WI_PORTTYPE_IBSS;
sc->sc_monitor_port = WI_PORTTYPE_APSILENT;
sc->sc_min_rssi = WI_PRISM_MIN_RSSI;
sc->sc_max_rssi = WI_PRISM_MAX_RSSI;
sc->sc_dbm_offset = WI_PRISM_DBM_OFFSET;
break;
}
/*
* Find out if we support WEP on this card.
*/
buflen = sizeof(val);
if (wi_read_rid(sc, WI_RID_WEP_AVAIL, &val, &buflen) == 0 &&
val != htole16(0))
ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
/* Find supported rates. */
buflen = sizeof(ratebuf);
rs = &ic->ic_sup_rates[IEEE80211_MODE_11B];
if (wi_read_rid(sc, WI_RID_DATA_RATES, ratebuf, &buflen) == 0) {
nrates = le16toh(*(u_int16_t *)ratebuf);
if (nrates > IEEE80211_RATE_MAXSIZE)
nrates = IEEE80211_RATE_MAXSIZE;
rs->rs_nrates = 0;
for (i = 0; i < nrates; i++)
if (ratebuf[2+i])
rs->rs_rates[rs->rs_nrates++] = ratebuf[2+i];
} else {
/* XXX fallback on error? */
}
buflen = sizeof(val);
if ((sc->sc_flags & WI_FLAGS_HAS_DBMADJUST) &&
wi_read_rid(sc, WI_RID_DBM_ADJUST, &val, &buflen) == 0) {
sc->sc_dbm_offset = le16toh(val);
}
sc->sc_portnum = WI_DEFAULT_PORT;
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
ic->ic_raw_xmit = wi_raw_xmit;
ic->ic_scan_start = wi_scan_start;
ic->ic_scan_end = wi_scan_end;
ic->ic_set_channel = wi_set_channel;
-
ic->ic_vap_create = wi_vap_create;
ic->ic_vap_delete = wi_vap_delete;
ic->ic_update_mcast = wi_update_mcast;
ic->ic_update_promisc = wi_update_promisc;
+ ic->ic_transmit = wi_transmit;
+ ic->ic_parent = wi_parent;
ieee80211_radiotap_attach(ic,
&sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
WI_TX_RADIOTAP_PRESENT,
&sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
WI_RX_RADIOTAP_PRESENT);
if (bootverbose)
ieee80211_announce(ic);
error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, wi_intr, sc, &sc->wi_intrhand);
if (error) {
device_printf(dev, "bus_setup_intr() failed! (%d)\n", error);
ieee80211_ifdetach(ic);
- if_free(sc->sc_ifp);
wi_free(dev);
return error;
}
return (0);
}
int
wi_detach(device_t dev)
{
struct wi_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
WI_LOCK(sc);
/* check if device was removed */
sc->wi_gone |= !bus_child_present(dev);
- wi_stop_locked(sc, 0);
+ wi_stop(sc, 0);
WI_UNLOCK(sc);
ieee80211_ifdetach(ic);
bus_teardown_intr(dev, sc->irq, sc->wi_intrhand);
- if_free(sc->sc_ifp);
wi_free(dev);
+ mbufq_drain(&sc->sc_snd);
mtx_destroy(&sc->sc_mtx);
return (0);
}
static struct ieee80211vap *
wi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct wi_softc *sc = ic->ic_softc;
struct wi_vap *wvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
- wvp = (struct wi_vap *) malloc(sizeof(struct wi_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (wvp == NULL)
- return NULL;
+ wvp = malloc(sizeof(struct wi_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &wvp->wv_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
vap->iv_max_aid = WI_MAX_AID;
switch (opmode) {
case IEEE80211_M_STA:
sc->sc_porttype = WI_PORTTYPE_BSS;
wvp->wv_newstate = vap->iv_newstate;
vap->iv_newstate = wi_newstate_sta;
/* need to filter mgt frames to avoid confusing state machine */
wvp->wv_recv_mgmt = vap->iv_recv_mgmt;
vap->iv_recv_mgmt = wi_recv_mgmt;
break;
case IEEE80211_M_IBSS:
sc->sc_porttype = sc->sc_ibss_port;
wvp->wv_newstate = vap->iv_newstate;
vap->iv_newstate = wi_newstate_sta;
break;
case IEEE80211_M_AHDEMO:
sc->sc_porttype = WI_PORTTYPE_ADHOC;
break;
case IEEE80211_M_HOSTAP:
sc->sc_porttype = WI_PORTTYPE_HOSTAP;
wvp->wv_newstate = vap->iv_newstate;
vap->iv_newstate = wi_newstate_hostap;
break;
case IEEE80211_M_MONITOR:
sc->sc_porttype = sc->sc_monitor_port;
break;
default:
break;
}
/* complete setup */
- ieee80211_vap_attach(vap, ieee80211_media_change, wi_media_status);
+ ieee80211_vap_attach(vap, ieee80211_media_change, wi_media_status, mac);
ic->ic_opmode = opmode;
return vap;
}
static void
wi_vap_delete(struct ieee80211vap *vap)
{
struct wi_vap *wvp = WI_VAP(vap);
ieee80211_vap_detach(vap);
free(wvp, M_80211_VAP);
}
int
wi_shutdown(device_t dev)
{
struct wi_softc *sc = device_get_softc(dev);
+ WI_LOCK(sc);
wi_stop(sc, 1);
+ WI_UNLOCK(sc);
return (0);
}
void
wi_intr(void *arg)
{
struct wi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
u_int16_t status;
WI_LOCK(sc);
- if (sc->wi_gone || !sc->sc_enabled || (ifp->if_flags & IFF_UP) == 0) {
+ if (sc->wi_gone || !sc->sc_enabled ||
+ (sc->sc_flags & WI_FLAGS_RUNNING) == 0) {
CSR_WRITE_2(sc, WI_INT_EN, 0);
CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF);
WI_UNLOCK(sc);
return;
}
/* Disable interrupts. */
CSR_WRITE_2(sc, WI_INT_EN, 0);
status = CSR_READ_2(sc, WI_EVENT_STAT);
if (status & WI_EV_RX)
wi_rx_intr(sc);
if (status & WI_EV_ALLOC)
wi_tx_intr(sc);
if (status & WI_EV_TX_EXC)
wi_tx_ex_intr(sc);
if (status & WI_EV_INFO)
wi_info_intr(sc);
- if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
- !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
- wi_start_locked(ifp);
+ if (mbufq_first(&sc->sc_snd) != NULL)
+ wi_start(sc);
/* Re-enable interrupts. */
CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS);
WI_UNLOCK(sc);
return;
}
static void
wi_enable(struct wi_softc *sc)
{
/* Enable interrupts */
CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS);
/* enable port */
wi_cmd(sc, WI_CMD_ENABLE | sc->sc_portnum, 0, 0, 0);
sc->sc_enabled = 1;
}
static int
wi_setup_locked(struct wi_softc *sc, int porttype, int mode,
- uint8_t mac[IEEE80211_ADDR_LEN])
+ const uint8_t mac[IEEE80211_ADDR_LEN])
{
int i;
wi_reset(sc);
wi_write_val(sc, WI_RID_PORTTYPE, porttype);
wi_write_val(sc, WI_RID_CREATE_IBSS, mode);
wi_write_val(sc, WI_RID_MAX_DATALEN, 2304);
/* XXX IEEE80211_BPF_NOACK wants 0 */
wi_write_val(sc, WI_RID_ALT_RETRY_CNT, 2);
if (sc->sc_flags & WI_FLAGS_HAS_ROAMING)
wi_write_val(sc, WI_RID_ROAMING_MODE, 3); /* NB: disabled */
wi_write_rid(sc, WI_RID_MAC_NODE, mac, IEEE80211_ADDR_LEN);
/* Allocate fids for the card */
sc->sc_buflen = IEEE80211_MAX_LEN + sizeof(struct wi_frame);
for (i = 0; i < sc->sc_ntxbuf; i++) {
int error = wi_alloc_fid(sc, sc->sc_buflen,
&sc->sc_txd[i].d_fid);
if (error) {
device_printf(sc->sc_dev,
"tx buffer allocation failed (error %u)\n",
error);
return error;
}
sc->sc_txd[i].d_len = 0;
}
sc->sc_txcur = sc->sc_txnext = 0;
return 0;
}
-static void
-wi_init_locked(struct wi_softc *sc)
+void
+wi_init(struct wi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
int wasenabled;
WI_LOCK_ASSERT(sc);
wasenabled = sc->sc_enabled;
if (wasenabled)
- wi_stop_locked(sc, 1);
+ wi_stop(sc, 1);
- if (wi_setup_locked(sc, sc->sc_porttype, 3, IF_LLADDR(ifp)) != 0) {
- if_printf(ifp, "interface not running\n");
- wi_stop_locked(sc, 1);
+ if (wi_setup_locked(sc, sc->sc_porttype, 3,
+ sc->sc_ic.ic_macaddr) != 0) {
+ device_printf(sc->sc_dev, "interface not running\n");
+ wi_stop(sc, 1);
return;
}
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ sc->sc_flags |= WI_FLAGS_RUNNING;
callout_reset(&sc->sc_watchdog, hz, wi_watchdog, sc);
wi_enable(sc); /* Enable desired port */
}
void
-wi_init(void *arg)
+wi_stop(struct wi_softc *sc, int disable)
{
- struct wi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- WI_LOCK(sc);
- wi_init_locked(sc);
- WI_UNLOCK(sc);
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ieee80211_start_all(ic); /* start all vap's */
-}
-
-static void
-wi_stop_locked(struct wi_softc *sc, int disable)
-{
- struct ifnet *ifp = sc->sc_ifp;
-
WI_LOCK_ASSERT(sc);
if (sc->sc_enabled && !sc->wi_gone) {
CSR_WRITE_2(sc, WI_INT_EN, 0);
wi_cmd(sc, WI_CMD_DISABLE | sc->sc_portnum, 0, 0, 0);
if (disable)
sc->sc_enabled = 0;
} else if (sc->wi_gone && disable) /* gone --> not enabled */
sc->sc_enabled = 0;
callout_stop(&sc->sc_watchdog);
sc->sc_tx_timer = 0;
sc->sc_false_syns = 0;
- ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING);
+ sc->sc_flags &= ~WI_FLAGS_RUNNING;
}
-void
-wi_stop(struct wi_softc *sc, int disable)
-{
- WI_LOCK(sc);
- wi_stop_locked(sc, disable);
- WI_UNLOCK(sc);
-}
-
static void
wi_set_channel(struct ieee80211com *ic)
{
struct wi_softc *sc = ic->ic_softc;
DPRINTF(("%s: channel %d, %sscanning\n", __func__,
ieee80211_chan2ieee(ic, ic->ic_curchan),
ic->ic_flags & IEEE80211_F_SCAN ? "" : "!"));
WI_LOCK(sc);
wi_write_val(sc, WI_RID_OWN_CHNL,
ieee80211_chan2ieee(ic, ic->ic_curchan));
WI_UNLOCK(sc);
}
static void
wi_scan_start(struct ieee80211com *ic)
{
struct wi_softc *sc = ic->ic_softc;
struct ieee80211_scan_state *ss = ic->ic_scan;
DPRINTF(("%s\n", __func__));
WI_LOCK(sc);
/*
* Switch device to monitor mode.
*/
wi_write_val(sc, WI_RID_PORTTYPE, sc->sc_monitor_port);
if (sc->sc_firmware_type == WI_INTERSIL) {
wi_cmd(sc, WI_CMD_DISABLE | WI_PORT0, 0, 0, 0);
wi_cmd(sc, WI_CMD_ENABLE | WI_PORT0, 0, 0, 0);
}
/* force full dwell time to compensate for firmware overhead */
ss->ss_mindwell = ss->ss_maxdwell = msecs_to_ticks(400);
WI_UNLOCK(sc);
}
static void
wi_scan_end(struct ieee80211com *ic)
{
struct wi_softc *sc = ic->ic_softc;
DPRINTF(("%s: restore port type %d\n", __func__, sc->sc_porttype));
WI_LOCK(sc);
wi_write_val(sc, WI_RID_PORTTYPE, sc->sc_porttype);
if (sc->sc_firmware_type == WI_INTERSIL) {
wi_cmd(sc, WI_CMD_DISABLE | WI_PORT0, 0, 0, 0);
wi_cmd(sc, WI_CMD_ENABLE | WI_PORT0, 0, 0, 0);
}
WI_UNLOCK(sc);
}
static void
wi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf)
{
struct ieee80211vap *vap = ni->ni_vap;
switch (subtype) {
case IEEE80211_FC0_SUBTYPE_AUTH:
case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
/* NB: filter frames that trigger state changes */
return;
}
WI_VAP(vap)->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf);
}
static int
wi_newstate_sta(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_node *bss;
struct wi_softc *sc = ic->ic_softc;
DPRINTF(("%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]));
if (nstate == IEEE80211_S_AUTH) {
WI_LOCK(sc);
wi_setup_locked(sc, WI_PORTTYPE_BSS, 3, vap->iv_myaddr);
if (vap->iv_flags & IEEE80211_F_PMGTON) {
wi_write_val(sc, WI_RID_MAX_SLEEP, ic->ic_lintval);
wi_write_val(sc, WI_RID_PM_ENABLED, 1);
}
wi_write_val(sc, WI_RID_RTS_THRESH, vap->iv_rtsthreshold);
if (sc->sc_flags & WI_FLAGS_HAS_FRAGTHR)
wi_write_val(sc, WI_RID_FRAG_THRESH,
vap->iv_fragthreshold);
wi_write_txrate(sc, vap);
bss = vap->iv_bss;
wi_write_ssid(sc, WI_RID_DESIRED_SSID, bss->ni_essid, bss->ni_esslen);
wi_write_val(sc, WI_RID_OWN_CHNL,
ieee80211_chan2ieee(ic, bss->ni_chan));
/* Configure WEP. */
if (ic->ic_cryptocaps & IEEE80211_CRYPTO_WEP)
wi_write_wep(sc, vap);
else
sc->sc_encryption = 0;
if ((sc->sc_flags & WI_FLAGS_HAS_WPASUPPORT) &&
(vap->iv_flags & IEEE80211_F_WPA)) {
wi_write_val(sc, WI_RID_WPA_HANDLING, 1);
if (vap->iv_appie_wpa != NULL)
wi_write_appie(sc, WI_RID_WPA_DATA,
vap->iv_appie_wpa);
}
wi_enable(sc); /* enable port */
/* Lucent firmware does not support the JOIN RID. */
if (sc->sc_firmware_type == WI_INTERSIL) {
struct wi_joinreq join;
memset(&join, 0, sizeof(join));
IEEE80211_ADDR_COPY(&join.wi_bssid, bss->ni_bssid);
join.wi_chan = htole16(
ieee80211_chan2ieee(ic, bss->ni_chan));
wi_write_rid(sc, WI_RID_JOIN_REQ, &join, sizeof(join));
}
WI_UNLOCK(sc);
/*
* NB: don't go through 802.11 layer, it'll send auth frame;
* instead we drive the state machine from the link status
* notification we get on association.
*/
vap->iv_state = nstate;
return (0);
}
return WI_VAP(vap)->wv_newstate(vap, nstate, arg);
}
static int
wi_newstate_hostap(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_node *bss;
struct wi_softc *sc = ic->ic_softc;
int error;
DPRINTF(("%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]));
error = WI_VAP(vap)->wv_newstate(vap, nstate, arg);
if (error == 0 && nstate == IEEE80211_S_RUN) {
WI_LOCK(sc);
wi_setup_locked(sc, WI_PORTTYPE_HOSTAP, 0, vap->iv_myaddr);
bss = vap->iv_bss;
wi_write_ssid(sc, WI_RID_OWN_SSID,
bss->ni_essid, bss->ni_esslen);
wi_write_val(sc, WI_RID_OWN_CHNL,
ieee80211_chan2ieee(ic, bss->ni_chan));
wi_write_val(sc, WI_RID_BASIC_RATE, 0x3);
wi_write_val(sc, WI_RID_SUPPORT_RATE, 0xf);
wi_write_txrate(sc, vap);
wi_write_val(sc, WI_RID_OWN_BEACON_INT, bss->ni_intval);
wi_write_val(sc, WI_RID_DTIM_PERIOD, vap->iv_dtim_period);
wi_write_val(sc, WI_RID_RTS_THRESH, vap->iv_rtsthreshold);
if (sc->sc_flags & WI_FLAGS_HAS_FRAGTHR)
wi_write_val(sc, WI_RID_FRAG_THRESH,
vap->iv_fragthreshold);
if ((sc->sc_flags & WI_FLAGS_HAS_ENHSECURITY) &&
(vap->iv_flags & IEEE80211_F_HIDESSID)) {
/*
* bit 0 means hide SSID in beacons,
* bit 1 means don't respond to bcast probe req
*/
wi_write_val(sc, WI_RID_ENH_SECURITY, 0x3);
}
if ((sc->sc_flags & WI_FLAGS_HAS_WPASUPPORT) &&
(vap->iv_flags & IEEE80211_F_WPA) &&
vap->iv_appie_wpa != NULL)
wi_write_appie(sc, WI_RID_WPA_DATA, vap->iv_appie_wpa);
wi_write_val(sc, WI_RID_PROMISC, 0);
/* Configure WEP. */
if (ic->ic_cryptocaps & IEEE80211_CRYPTO_WEP)
wi_write_wep(sc, vap);
else
sc->sc_encryption = 0;
wi_enable(sc); /* enable port */
WI_UNLOCK(sc);
}
return error;
}
+static int
+wi_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct wi_softc *sc = ic->ic_softc;
+ int error;
+
+ WI_LOCK(sc);
+ if ((sc->sc_flags & WI_FLAGS_RUNNING) == 0) {
+ WI_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ WI_UNLOCK(sc);
+ return (error);
+ }
+ wi_start(sc);
+ WI_UNLOCK(sc);
+ return (0);
+}
+
static void
-wi_start_locked(struct ifnet *ifp)
+wi_start(struct wi_softc *sc)
{
- struct wi_softc *sc = ifp->if_softc;
struct ieee80211_node *ni;
struct ieee80211_frame *wh;
struct mbuf *m0;
struct ieee80211_key *k;
struct wi_frame frmhdr;
const struct llc *llc;
int cur;
WI_LOCK_ASSERT(sc);
if (sc->wi_gone)
return;
memset(&frmhdr, 0, sizeof(frmhdr));
cur = sc->sc_txnext;
- for (;;) {
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
- if (m0 == NULL)
- break;
- if (sc->sc_txd[cur].d_len != 0) {
- IFQ_DRV_PREPEND(&ifp->if_snd, m0);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- break;
- }
+ while (sc->sc_txd[cur].d_len == 0 &&
+ (m0 = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m0->m_pkthdr.rcvif;
/* reconstruct 802.3 header */
wh = mtod(m0, struct ieee80211_frame *);
switch (wh->i_fc[1]) {
case IEEE80211_FC1_DIR_TODS:
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_shost,
wh->i_addr2);
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_dhost,
wh->i_addr3);
break;
case IEEE80211_FC1_DIR_NODS:
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_shost,
wh->i_addr2);
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_dhost,
wh->i_addr1);
break;
case IEEE80211_FC1_DIR_FROMDS:
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_shost,
wh->i_addr3);
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_dhost,
wh->i_addr1);
break;
}
llc = (const struct llc *)(
mtod(m0, const uint8_t *) + ieee80211_hdrsize(wh));
frmhdr.wi_ehdr.ether_type = llc->llc_snap.ether_type;
frmhdr.wi_tx_ctl = htole16(WI_ENC_TX_802_11|WI_TXCNTL_TX_EX);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
ieee80211_free_node(ni);
m_freem(m0);
continue;
}
frmhdr.wi_tx_ctl |= htole16(WI_TXCNTL_NOCRYPT);
}
if (ieee80211_radiotap_active_vap(ni->ni_vap)) {
sc->sc_tx_th.wt_rate = ni->ni_txrate;
ieee80211_radiotap_tx(ni->ni_vap, m0);
}
m_copydata(m0, 0, sizeof(struct ieee80211_frame),
(caddr_t)&frmhdr.wi_whdr);
m_adj(m0, sizeof(struct ieee80211_frame));
frmhdr.wi_dat_len = htole16(m0->m_pkthdr.len);
ieee80211_free_node(ni);
- if (wi_start_tx(ifp, &frmhdr, m0))
+ if (wi_start_tx(sc, &frmhdr, m0))
continue;
sc->sc_txnext = cur = (cur + 1) % sc->sc_ntxbuf;
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
}
-static void
-wi_start(struct ifnet *ifp)
-{
- struct wi_softc *sc = ifp->if_softc;
-
- WI_LOCK(sc);
- wi_start_locked(ifp);
- WI_UNLOCK(sc);
-}
-
static int
-wi_start_tx(struct ifnet *ifp, struct wi_frame *frmhdr, struct mbuf *m0)
+wi_start_tx(struct wi_softc *sc, struct wi_frame *frmhdr, struct mbuf *m0)
{
- struct wi_softc *sc = ifp->if_softc;
int cur = sc->sc_txnext;
int fid, off, error;
fid = sc->sc_txd[cur].d_fid;
off = sizeof(*frmhdr);
error = wi_write_bap(sc, fid, 0, frmhdr, sizeof(*frmhdr)) != 0
|| wi_mwrite_bap(sc, fid, off, m0, m0->m_pkthdr.len) != 0;
m_freem(m0);
if (error) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
return -1;
}
sc->sc_txd[cur].d_len = off;
if (sc->sc_txcur == cur) {
if (wi_cmd(sc, WI_CMD_TX | WI_RECLAIM, fid, 0, 0)) {
- if_printf(ifp, "xmit failed\n");
+ device_printf(sc->sc_dev, "xmit failed\n");
sc->sc_txd[cur].d_len = 0;
return -1;
}
sc->sc_tx_timer = 5;
}
return 0;
}
static int
wi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m0,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct ieee80211vap *vap = ni->ni_vap;
struct wi_softc *sc = ic->ic_softc;
struct ieee80211_key *k;
struct ieee80211_frame *wh;
struct wi_frame frmhdr;
int cur;
int rc = 0;
WI_LOCK(sc);
if (sc->wi_gone) {
rc = ENETDOWN;
goto out;
}
memset(&frmhdr, 0, sizeof(frmhdr));
cur = sc->sc_txnext;
if (sc->sc_txd[cur].d_len != 0) {
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
rc = ENOBUFS;
goto out;
}
m0->m_pkthdr.rcvif = NULL;
m_copydata(m0, 4, ETHER_ADDR_LEN * 2,
(caddr_t)&frmhdr.wi_ehdr);
frmhdr.wi_ehdr.ether_type = 0;
wh = mtod(m0, struct ieee80211_frame *);
frmhdr.wi_tx_ctl = htole16(WI_ENC_TX_802_11|WI_TXCNTL_TX_EX);
if (params && (params->ibp_flags & IEEE80211_BPF_NOACK))
frmhdr.wi_tx_ctl |= htole16(WI_TXCNTL_ALTRTRY);
if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
(!params || (params && (params->ibp_flags & IEEE80211_BPF_CRYPTO)))) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
rc = ENOMEM;
goto out;
}
frmhdr.wi_tx_ctl |= htole16(WI_TXCNTL_NOCRYPT);
}
if (ieee80211_radiotap_active_vap(vap)) {
sc->sc_tx_th.wt_rate = ni->ni_txrate;
ieee80211_radiotap_tx(vap, m0);
}
m_copydata(m0, 0, sizeof(struct ieee80211_frame),
(caddr_t)&frmhdr.wi_whdr);
m_adj(m0, sizeof(struct ieee80211_frame));
frmhdr.wi_dat_len = htole16(m0->m_pkthdr.len);
- if (wi_start_tx(ifp, &frmhdr, m0) < 0) {
+ if (wi_start_tx(sc, &frmhdr, m0) < 0) {
m0 = NULL;
rc = EIO;
goto out;
}
m0 = NULL;
sc->sc_txnext = cur = (cur + 1) % sc->sc_ntxbuf;
out:
WI_UNLOCK(sc);
if (m0 != NULL)
m_freem(m0);
ieee80211_free_node(ni);
return rc;
}
static int
wi_reset(struct wi_softc *sc)
{
#define WI_INIT_TRIES 3
int i, error = 0;
for (i = 0; i < WI_INIT_TRIES; i++) {
error = wi_cmd(sc, WI_CMD_INI, 0, 0, 0);
if (error == 0)
break;
DELAY(WI_DELAY * 1000);
}
sc->sc_reset = 1;
if (i == WI_INIT_TRIES) {
- if_printf(sc->sc_ifp, "reset failed\n");
+ device_printf(sc->sc_dev, "reset failed\n");
return error;
}
CSR_WRITE_2(sc, WI_INT_EN, 0);
CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF);
/* Calibrate timer. */
wi_write_val(sc, WI_RID_TICK_TIME, 8);
return 0;
#undef WI_INIT_TRIES
}
static void
wi_watchdog(void *arg)
{
struct wi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
WI_LOCK_ASSERT(sc);
if (!sc->sc_enabled)
return;
if (sc->sc_tx_timer && --sc->sc_tx_timer == 0) {
- if_printf(ifp, "device timeout\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- wi_init_locked(ifp->if_softc);
+ device_printf(sc->sc_dev, "device timeout\n");
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
+ wi_init(sc);
return;
}
callout_reset(&sc->sc_watchdog, hz, wi_watchdog, sc);
}
-static int
-wi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+wi_parent(struct ieee80211com *ic)
{
- struct wi_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0, startall = 0;
+ struct wi_softc *sc = ic->ic_softc;
+ int startall = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- WI_LOCK(sc);
- /*
- * Can't do promisc and hostap at the same time. If all that's
- * changing is the promisc flag, try to short-circuit a call to
- * wi_init() by just setting PROMISC in the hardware.
- */
- if (ifp->if_flags & IFF_UP) {
- if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
- ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if ((ifp->if_flags ^ sc->sc_if_flags) & IFF_PROMISC) {
- wi_write_val(sc, WI_RID_PROMISC,
- (ifp->if_flags & IFF_PROMISC) != 0);
- } else {
- wi_init_locked(sc);
- startall = 1;
- }
+ WI_LOCK(sc);
+ /*
+ * Can't do promisc and hostap at the same time. If all that's
+ * changing is the promisc flag, try to short-circuit a call to
+ * wi_init() by just setting PROMISC in the hardware.
+ */
+ if (ic->ic_nrunning > 0) {
+ if (ic->ic_opmode != IEEE80211_M_HOSTAP &&
+ sc->sc_flags & WI_FLAGS_RUNNING) {
+ if (ic->ic_promisc > 0 &&
+ (sc->sc_flags & WI_FLAGS_PROMISC) == 0) {
+ wi_write_val(sc, WI_RID_PROMISC, 1);
+ sc->sc_flags |= WI_FLAGS_PROMISC;
+ } else if (ic->ic_promisc == 0 &&
+ (sc->sc_flags & WI_FLAGS_PROMISC) != 0) {
+ wi_write_val(sc, WI_RID_PROMISC, 0);
+ sc->sc_flags &= ~WI_FLAGS_PROMISC;
} else {
- wi_init_locked(sc);
+ wi_init(sc);
startall = 1;
}
} else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- wi_stop_locked(sc, 1);
- sc->wi_gone = 0;
+ wi_init(sc);
+ startall = 1;
}
- sc->sc_if_flags = ifp->if_flags;
- WI_UNLOCK(sc);
- if (startall)
- ieee80211_start_all(ic);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- error = EINVAL;
- break;
+ } else if (sc->sc_flags & WI_FLAGS_RUNNING) {
+ wi_stop(sc, 1);
+ sc->wi_gone = 0;
}
- return error;
+ WI_UNLOCK(sc);
+ if (startall)
+ ieee80211_start_all(ic);
}
static void
wi_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
struct wi_softc *sc = ic->ic_softc;
u_int16_t val;
int rate, len;
len = sizeof(val);
if (sc->sc_enabled &&
wi_read_rid(sc, WI_RID_CUR_TX_RATE, &val, &len) == 0 &&
len == sizeof(val)) {
/* convert to 802.11 rate */
val = le16toh(val);
rate = val * 2;
if (sc->sc_firmware_type == WI_LUCENT) {
if (rate == 10)
rate = 11; /* 5.5Mbps */
} else {
if (rate == 4*2)
rate = 11; /* 5.5Mbps */
else if (rate == 8*2)
rate = 22; /* 11Mbps */
}
vap->iv_bss->ni_txrate = rate;
}
ieee80211_media_status(ifp, imr);
}
static void
wi_sync_bssid(struct wi_softc *sc, u_int8_t new_bssid[IEEE80211_ADDR_LEN])
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_node *ni = vap->iv_bss;
if (IEEE80211_ADDR_EQ(new_bssid, ni->ni_bssid))
return;
DPRINTF(("wi_sync_bssid: bssid %s -> ", ether_sprintf(ni->ni_bssid)));
DPRINTF(("%s ?\n", ether_sprintf(new_bssid)));
/* In promiscuous mode, the BSSID field is not a reliable
* indicator of the firmware's BSSID. Damp spurious
* change-of-BSSID indications.
*/
- if ((ifp->if_flags & IFF_PROMISC) != 0 &&
+ if (ic->ic_promisc > 0 &&
!ppsratecheck(&sc->sc_last_syn, &sc->sc_false_syns,
WI_MAX_FALSE_SYNS))
return;
sc->sc_false_syns = MAX(0, sc->sc_false_syns - 1);
#if 0
/*
* XXX hack; we should create a new node with the new bssid
* and replace the existing ic_bss with it but since we don't
* process management frames to collect state we cheat by
* reusing the existing node as we know wi_newstate will be
* called and it will overwrite the node state.
*/
ieee80211_sta_join(ic, ieee80211_ref_node(ni));
#endif
}
static __noinline void
wi_rx_intr(struct wi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct wi_frame frmhdr;
struct mbuf *m;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
int fid, len, off;
u_int8_t dir;
u_int16_t status;
int8_t rssi, nf;
fid = CSR_READ_2(sc, WI_RX_FID);
/* First read in the frame header */
if (wi_read_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr))) {
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
DPRINTF(("wi_rx_intr: read fid %x failed\n", fid));
return;
}
/*
* Drop undecryptable or packets with receive errors here
*/
status = le16toh(frmhdr.wi_status);
if (status & WI_STAT_ERRSTAT) {
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
DPRINTF(("wi_rx_intr: fid %x error status %x\n", fid, status));
return;
}
len = le16toh(frmhdr.wi_dat_len);
off = ALIGN(sizeof(struct ieee80211_frame));
/*
* Sometimes the PRISM2.x returns bogusly large frames. Except
* in monitor mode, just throw them away.
*/
if (off + len > MCLBYTES) {
if (ic->ic_opmode != IEEE80211_M_MONITOR) {
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
DPRINTF(("wi_rx_intr: oversized packet\n"));
return;
} else
len = 0;
}
if (off + len > MHLEN)
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
else
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX);
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ counter_u64_add(ic->ic_ierrors, 1);
DPRINTF(("wi_rx_intr: MGET failed\n"));
return;
}
m->m_data += off - sizeof(struct ieee80211_frame);
memcpy(m->m_data, &frmhdr.wi_whdr, sizeof(struct ieee80211_frame));
wi_read_bap(sc, fid, sizeof(frmhdr),
m->m_data + sizeof(struct ieee80211_frame), len);
m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame) + len;
- m->m_pkthdr.rcvif = ifp;
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX);
rssi = frmhdr.wi_rx_signal;
nf = frmhdr.wi_rx_silence;
if (ieee80211_radiotap_active(ic)) {
struct wi_rx_radiotap_header *tap = &sc->sc_rx_th;
uint32_t rstamp;
rstamp = (le16toh(frmhdr.wi_rx_tstamp0) << 16) |
le16toh(frmhdr.wi_rx_tstamp1);
tap->wr_tsf = htole64((uint64_t)rstamp);
/* XXX replace divide by table */
tap->wr_rate = frmhdr.wi_rx_rate / 5;
tap->wr_flags = 0;
if (frmhdr.wi_status & WI_STAT_PCF)
tap->wr_flags |= IEEE80211_RADIOTAP_F_CFP;
if (m->m_flags & M_WEP)
tap->wr_flags |= IEEE80211_RADIOTAP_F_WEP;
tap->wr_antsignal = rssi;
tap->wr_antnoise = nf;
}
/* synchronize driver's BSSID with firmware's BSSID */
wh = mtod(m, struct ieee80211_frame *);
dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
if (ic->ic_opmode == IEEE80211_M_IBSS && dir == IEEE80211_FC1_DIR_NODS)
wi_sync_bssid(sc, wh->i_addr3);
WI_UNLOCK(sc);
ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *));
if (ni != NULL) {
(void) ieee80211_input(ni, m, rssi, nf);
ieee80211_free_node(ni);
} else
(void) ieee80211_input_all(ic, m, rssi, nf);
WI_LOCK(sc);
}
static __noinline void
wi_tx_ex_intr(struct wi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
struct wi_frame frmhdr;
int fid;
fid = CSR_READ_2(sc, WI_TX_CMP_FID);
/* Read in the frame header */
if (wi_read_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr)) == 0) {
u_int16_t status = le16toh(frmhdr.wi_status);
/*
* Spontaneous station disconnects appear as xmit
* errors. Don't announce them and/or count them
* as an output error.
*/
if ((status & WI_TXSTAT_DISCONNECT) == 0) {
if (ppsratecheck(&lasttxerror, &curtxeps, wi_txerate)) {
- if_printf(ifp, "tx failed");
+ device_printf(sc->sc_dev, "tx failed");
if (status & WI_TXSTAT_RET_ERR)
printf(", retry limit exceeded");
if (status & WI_TXSTAT_AGED_ERR)
printf(", max transmit lifetime exceeded");
if (status & WI_TXSTAT_DISCONNECT)
printf(", port disconnected");
if (status & WI_TXSTAT_FORM_ERR)
printf(", invalid format (data len %u src %6D)",
le16toh(frmhdr.wi_dat_len),
frmhdr.wi_ehdr.ether_shost, ":");
if (status & ~0xf)
printf(", status=0x%x", status);
printf("\n");
}
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
} else
DPRINTF(("port disconnected\n"));
} else
DPRINTF(("wi_tx_ex_intr: read fid %x failed\n", fid));
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_TX_EXC);
}
static __noinline void
wi_tx_intr(struct wi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
int fid, cur;
if (sc->wi_gone)
return;
fid = CSR_READ_2(sc, WI_ALLOC_FID);
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC);
cur = sc->sc_txcur;
if (sc->sc_txd[cur].d_fid != fid) {
- if_printf(ifp, "bad alloc %x != %x, cur %d nxt %d\n",
+ device_printf(sc->sc_dev, "bad alloc %x != %x, cur %d nxt %d\n",
fid, sc->sc_txd[cur].d_fid, cur, sc->sc_txnext);
return;
}
sc->sc_tx_timer = 0;
sc->sc_txd[cur].d_len = 0;
sc->sc_txcur = cur = (cur + 1) % sc->sc_ntxbuf;
- if (sc->sc_txd[cur].d_len == 0)
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- else {
+ if (sc->sc_txd[cur].d_len != 0) {
if (wi_cmd(sc, WI_CMD_TX | WI_RECLAIM, sc->sc_txd[cur].d_fid,
0, 0)) {
- if_printf(ifp, "xmit failed\n");
+ device_printf(sc->sc_dev, "xmit failed\n");
sc->sc_txd[cur].d_len = 0;
} else {
sc->sc_tx_timer = 5;
}
}
}
static __noinline void
wi_info_intr(struct wi_softc *sc)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
int i, fid, len, off;
u_int16_t ltbuf[2];
u_int16_t stat;
u_int32_t *ptr;
fid = CSR_READ_2(sc, WI_INFO_FID);
wi_read_bap(sc, fid, 0, ltbuf, sizeof(ltbuf));
switch (le16toh(ltbuf[1])) {
case WI_INFO_LINK_STAT:
wi_read_bap(sc, fid, sizeof(ltbuf), &stat, sizeof(stat));
DPRINTF(("wi_info_intr: LINK_STAT 0x%x\n", le16toh(stat)));
if (vap == NULL)
goto finish;
switch (le16toh(stat)) {
case WI_INFO_LINK_STAT_CONNECTED:
if (vap->iv_state == IEEE80211_S_RUN &&
vap->iv_opmode != IEEE80211_M_IBSS)
break;
/* fall thru... */
case WI_INFO_LINK_STAT_AP_CHG:
IEEE80211_LOCK(ic);
vap->iv_bss->ni_associd = 1 | 0xc000; /* NB: anything will do */
ieee80211_new_state(vap, IEEE80211_S_RUN, 0);
IEEE80211_UNLOCK(ic);
break;
case WI_INFO_LINK_STAT_AP_INR:
break;
case WI_INFO_LINK_STAT_DISCONNECTED:
/* we dropped off the net; e.g. due to deauth/disassoc */
IEEE80211_LOCK(ic);
vap->iv_bss->ni_associd = 0;
vap->iv_stats.is_rx_deauth++;
ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
IEEE80211_UNLOCK(ic);
break;
case WI_INFO_LINK_STAT_AP_OOR:
/* XXX does this need to be per-vap? */
ieee80211_beacon_miss(ic);
break;
case WI_INFO_LINK_STAT_ASSOC_FAILED:
if (vap->iv_opmode == IEEE80211_M_STA)
ieee80211_new_state(vap, IEEE80211_S_SCAN,
IEEE80211_SCAN_FAIL_TIMEOUT);
break;
}
break;
case WI_INFO_COUNTERS:
/* some card versions have a larger stats structure */
len = min(le16toh(ltbuf[0]) - 1, sizeof(sc->sc_stats) / 4);
ptr = (u_int32_t *)&sc->sc_stats;
off = sizeof(ltbuf);
for (i = 0; i < len; i++, off += 2, ptr++) {
wi_read_bap(sc, fid, off, &stat, sizeof(stat));
#ifdef WI_HERMES_STATS_WAR
if (stat & 0xf000)
stat = ~stat;
#endif
*ptr += stat;
}
break;
default:
DPRINTF(("wi_info_intr: got fid %x type %x len %d\n", fid,
le16toh(ltbuf[1]), le16toh(ltbuf[0])));
break;
}
finish:
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_INFO);
}
-static uint64_t
-wi_get_counter(struct ifnet *ifp, ift_counter cnt)
-{
- struct wi_softc *sc;
-
- sc = if_getsoftc(ifp);
-
- switch (cnt) {
- case IFCOUNTER_COLLISIONS:
- return (sc->sc_stats.wi_tx_single_retries +
- sc->sc_stats.wi_tx_multi_retries +
- sc->sc_stats.wi_tx_retry_limit);
- default:
- return (if_get_counter_default(ifp, cnt));
- }
-}
-
static int
wi_write_multi(struct wi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- int n;
- struct ifmultiaddr *ifma;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap;
struct wi_mcast mlist;
+ int n;
- if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
+ if (ic->ic_allmulti > 0 || ic->ic_promisc > 0) {
allmulti:
memset(&mlist, 0, sizeof(mlist));
return wi_write_rid(sc, WI_RID_MCAST_LIST, &mlist,
sizeof(mlist));
}
n = 0;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (n >= 16)
- goto allmulti;
- IEEE80211_ADDR_COPY(&mlist.wi_mcast[n],
- (LLADDR((struct sockaddr_dl *)ifma->ifma_addr)));
- n++;
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ struct ifnet *ifp;
+ struct ifmultiaddr *ifma;
+
+ ifp = vap->iv_ifp;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ if (n >= 16)
+ goto allmulti;
+ IEEE80211_ADDR_COPY(&mlist.wi_mcast[n],
+ (LLADDR((struct sockaddr_dl *)ifma->ifma_addr)));
+ n++;
+ }
+ if_maddr_runlock(ifp);
}
- if_maddr_runlock(ifp);
return wi_write_rid(sc, WI_RID_MCAST_LIST, &mlist,
IEEE80211_ADDR_LEN * n);
}
static void
wi_update_mcast(struct ieee80211com *ic)
{
wi_write_multi(ic->ic_softc);
}
static void
wi_update_promisc(struct ieee80211com *ic)
{
struct wi_softc *sc = ic->ic_softc;
WI_LOCK(sc);
/* XXX handle WEP special case handling? */
wi_write_val(sc, WI_RID_PROMISC,
(ic->ic_opmode == IEEE80211_M_MONITOR ||
- (ic->ic_ifp->if_flags & IFF_PROMISC)));
+ (ic->ic_promisc > 0)));
WI_UNLOCK(sc);
}
static void
wi_read_nicid(struct wi_softc *sc)
{
struct wi_card_ident *id;
char *p;
int len;
u_int16_t ver[4];
/* getting chip identity */
memset(ver, 0, sizeof(ver));
len = sizeof(ver);
wi_read_rid(sc, WI_RID_CARD_ID, ver, &len);
sc->sc_firmware_type = WI_NOTYPE;
sc->sc_nic_id = le16toh(ver[0]);
for (id = wi_card_ident; id->card_name != NULL; id++) {
if (sc->sc_nic_id == id->card_id) {
sc->sc_nic_name = id->card_name;
sc->sc_firmware_type = id->firm_type;
break;
}
}
if (sc->sc_firmware_type == WI_NOTYPE) {
if (sc->sc_nic_id & 0x8000) {
sc->sc_firmware_type = WI_INTERSIL;
sc->sc_nic_name = "Unknown Prism chip";
} else {
sc->sc_firmware_type = WI_LUCENT;
sc->sc_nic_name = "Unknown Lucent chip";
}
}
if (bootverbose)
device_printf(sc->sc_dev, "using %s\n", sc->sc_nic_name);
/* get primary firmware version (Only Prism chips) */
if (sc->sc_firmware_type != WI_LUCENT) {
memset(ver, 0, sizeof(ver));
len = sizeof(ver);
wi_read_rid(sc, WI_RID_PRI_IDENTITY, ver, &len);
sc->sc_pri_firmware_ver = le16toh(ver[2]) * 10000 +
le16toh(ver[3]) * 100 + le16toh(ver[1]);
}
/* get station firmware version */
memset(ver, 0, sizeof(ver));
len = sizeof(ver);
wi_read_rid(sc, WI_RID_STA_IDENTITY, ver, &len);
sc->sc_sta_firmware_ver = le16toh(ver[2]) * 10000 +
le16toh(ver[3]) * 100 + le16toh(ver[1]);
if (sc->sc_firmware_type == WI_INTERSIL &&
(sc->sc_sta_firmware_ver == 10102 ||
sc->sc_sta_firmware_ver == 20102)) {
char ident[12];
memset(ident, 0, sizeof(ident));
len = sizeof(ident);
/* value should be the format like "V2.00-11" */
if (wi_read_rid(sc, WI_RID_SYMBOL_IDENTITY, ident, &len) == 0 &&
*(p = (char *)ident) >= 'A' &&
p[2] == '.' && p[5] == '-' && p[8] == '\0') {
sc->sc_firmware_type = WI_SYMBOL;
sc->sc_sta_firmware_ver = (p[1] - '0') * 10000 +
(p[3] - '0') * 1000 + (p[4] - '0') * 100 +
(p[6] - '0') * 10 + (p[7] - '0');
}
}
if (bootverbose) {
device_printf(sc->sc_dev, "%s Firmware: ",
wi_firmware_names[sc->sc_firmware_type]);
if (sc->sc_firmware_type != WI_LUCENT) /* XXX */
printf("Primary (%u.%u.%u), ",
sc->sc_pri_firmware_ver / 10000,
(sc->sc_pri_firmware_ver % 10000) / 100,
sc->sc_pri_firmware_ver % 100);
printf("Station (%u.%u.%u)\n",
sc->sc_sta_firmware_ver / 10000,
(sc->sc_sta_firmware_ver % 10000) / 100,
sc->sc_sta_firmware_ver % 100);
}
}
static int
wi_write_ssid(struct wi_softc *sc, int rid, u_int8_t *buf, int buflen)
{
struct wi_ssid ssid;
if (buflen > IEEE80211_NWID_LEN)
return ENOBUFS;
memset(&ssid, 0, sizeof(ssid));
ssid.wi_len = htole16(buflen);
memcpy(ssid.wi_ssid, buf, buflen);
return wi_write_rid(sc, rid, &ssid, sizeof(ssid));
}
static int
wi_write_txrate(struct wi_softc *sc, struct ieee80211vap *vap)
{
static const uint16_t lucent_rates[12] = {
[ 0] = 3, /* auto */
[ 1] = 1, /* 1Mb/s */
[ 2] = 2, /* 2Mb/s */
[ 5] = 4, /* 5.5Mb/s */
[11] = 5 /* 11Mb/s */
};
static const uint16_t intersil_rates[12] = {
[ 0] = 0xf, /* auto */
[ 1] = 0, /* 1Mb/s */
[ 2] = 1, /* 2Mb/s */
[ 5] = 2, /* 5.5Mb/s */
[11] = 3, /* 11Mb/s */
};
const uint16_t *rates = sc->sc_firmware_type == WI_LUCENT ?
lucent_rates : intersil_rates;
struct ieee80211com *ic = vap->iv_ic;
const struct ieee80211_txparam *tp;
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)];
return wi_write_val(sc, WI_RID_TX_RATE,
(tp->ucastrate == IEEE80211_FIXED_RATE_NONE ?
rates[0] : rates[tp->ucastrate / 2]));
}
static int
wi_write_wep(struct wi_softc *sc, struct ieee80211vap *vap)
{
int error = 0;
int i, keylen;
u_int16_t val;
struct wi_key wkey[IEEE80211_WEP_NKID];
switch (sc->sc_firmware_type) {
case WI_LUCENT:
val = (vap->iv_flags & IEEE80211_F_PRIVACY) ? 1 : 0;
error = wi_write_val(sc, WI_RID_ENCRYPTION, val);
if (error)
break;
if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0)
break;
error = wi_write_val(sc, WI_RID_TX_CRYPT_KEY, vap->iv_def_txkey);
if (error)
break;
memset(wkey, 0, sizeof(wkey));
for (i = 0; i < IEEE80211_WEP_NKID; i++) {
keylen = vap->iv_nw_keys[i].wk_keylen;
wkey[i].wi_keylen = htole16(keylen);
memcpy(wkey[i].wi_keydat, vap->iv_nw_keys[i].wk_key,
keylen);
}
error = wi_write_rid(sc, WI_RID_DEFLT_CRYPT_KEYS,
wkey, sizeof(wkey));
sc->sc_encryption = 0;
break;
case WI_INTERSIL:
val = HOST_ENCRYPT | HOST_DECRYPT;
if (vap->iv_flags & IEEE80211_F_PRIVACY) {
/*
* ONLY HWB3163 EVAL-CARD Firmware version
* less than 0.8 variant2
*
* If promiscuous mode disable, Prism2 chip
* does not work with WEP .
* It is under investigation for details.
* (ichiro@netbsd.org)
*/
if (sc->sc_sta_firmware_ver < 802 ) {
/* firm ver < 0.8 variant 2 */
wi_write_val(sc, WI_RID_PROMISC, 1);
}
wi_write_val(sc, WI_RID_CNFAUTHMODE,
vap->iv_bss->ni_authmode);
val |= PRIVACY_INVOKED;
} else {
wi_write_val(sc, WI_RID_CNFAUTHMODE, IEEE80211_AUTH_OPEN);
}
error = wi_write_val(sc, WI_RID_P2_ENCRYPTION, val);
if (error)
break;
sc->sc_encryption = val;
if ((val & PRIVACY_INVOKED) == 0)
break;
error = wi_write_val(sc, WI_RID_P2_TX_CRYPT_KEY, vap->iv_def_txkey);
break;
}
return error;
}
static int
wi_cmd(struct wi_softc *sc, int cmd, int val0, int val1, int val2)
{
int i, s = 0;
if (sc->wi_gone)
return (ENODEV);
/* wait for the busy bit to clear */
for (i = sc->wi_cmd_count; i > 0; i--) { /* 500ms */
if (!(CSR_READ_2(sc, WI_COMMAND) & WI_CMD_BUSY))
break;
DELAY(1*1000); /* 1ms */
}
if (i == 0) {
device_printf(sc->sc_dev, "%s: busy bit won't clear, cmd 0x%x\n",
__func__, cmd);
sc->wi_gone = 1;
return(ETIMEDOUT);
}
CSR_WRITE_2(sc, WI_PARAM0, val0);
CSR_WRITE_2(sc, WI_PARAM1, val1);
CSR_WRITE_2(sc, WI_PARAM2, val2);
CSR_WRITE_2(sc, WI_COMMAND, cmd);
if (cmd == WI_CMD_INI) {
/* XXX: should sleep here. */
DELAY(100*1000); /* 100ms delay for init */
}
for (i = 0; i < WI_TIMEOUT; i++) {
/*
* Wait for 'command complete' bit to be
* set in the event status register.
*/
s = CSR_READ_2(sc, WI_EVENT_STAT);
if (s & WI_EV_CMD) {
/* Ack the event and read result code. */
s = CSR_READ_2(sc, WI_STATUS);
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_CMD);
if (s & WI_STAT_CMD_RESULT) {
return(EIO);
}
break;
}
DELAY(WI_DELAY);
}
if (i == WI_TIMEOUT) {
device_printf(sc->sc_dev, "%s: timeout on cmd 0x%04x; "
"event status 0x%04x\n", __func__, cmd, s);
if (s == 0xffff)
sc->wi_gone = 1;
return(ETIMEDOUT);
}
return (0);
}
static int
wi_seek_bap(struct wi_softc *sc, int id, int off)
{
int i, status;
CSR_WRITE_2(sc, WI_SEL0, id);
CSR_WRITE_2(sc, WI_OFF0, off);
for (i = 0; ; i++) {
status = CSR_READ_2(sc, WI_OFF0);
if ((status & WI_OFF_BUSY) == 0)
break;
if (i == WI_TIMEOUT) {
device_printf(sc->sc_dev, "%s: timeout, id %x off %x\n",
__func__, id, off);
sc->sc_bap_off = WI_OFF_ERR; /* invalidate */
if (status == 0xffff)
sc->wi_gone = 1;
return ETIMEDOUT;
}
DELAY(1);
}
if (status & WI_OFF_ERR) {
device_printf(sc->sc_dev, "%s: error, id %x off %x\n",
__func__, id, off);
sc->sc_bap_off = WI_OFF_ERR; /* invalidate */
return EIO;
}
sc->sc_bap_id = id;
sc->sc_bap_off = off;
return 0;
}
static int
wi_read_bap(struct wi_softc *sc, int id, int off, void *buf, int buflen)
{
int error, cnt;
if (buflen == 0)
return 0;
if (id != sc->sc_bap_id || off != sc->sc_bap_off) {
if ((error = wi_seek_bap(sc, id, off)) != 0)
return error;
}
cnt = (buflen + 1) / 2;
CSR_READ_MULTI_STREAM_2(sc, WI_DATA0, (u_int16_t *)buf, cnt);
sc->sc_bap_off += cnt * 2;
return 0;
}
static int
-wi_write_bap(struct wi_softc *sc, int id, int off, void *buf, int buflen)
+wi_write_bap(struct wi_softc *sc, int id, int off, const void *buf, int buflen)
{
int error, cnt;
if (buflen == 0)
return 0;
if (id != sc->sc_bap_id || off != sc->sc_bap_off) {
if ((error = wi_seek_bap(sc, id, off)) != 0)
return error;
}
cnt = (buflen + 1) / 2;
- CSR_WRITE_MULTI_STREAM_2(sc, WI_DATA0, (u_int16_t *)buf, cnt);
+ CSR_WRITE_MULTI_STREAM_2(sc, WI_DATA0, (const uint16_t *)buf, cnt);
sc->sc_bap_off += cnt * 2;
return 0;
}
static int
wi_mwrite_bap(struct wi_softc *sc, int id, int off, struct mbuf *m0, int totlen)
{
int error, len;
struct mbuf *m;
for (m = m0; m != NULL && totlen > 0; m = m->m_next) {
if (m->m_len == 0)
continue;
len = min(m->m_len, totlen);
if (((u_long)m->m_data) % 2 != 0 || len % 2 != 0) {
m_copydata(m, 0, totlen, (caddr_t)&sc->sc_txbuf);
return wi_write_bap(sc, id, off, (caddr_t)&sc->sc_txbuf,
totlen);
}
if ((error = wi_write_bap(sc, id, off, m->m_data, len)) != 0)
return error;
off += m->m_len;
totlen -= len;
}
return 0;
}
static int
wi_alloc_fid(struct wi_softc *sc, int len, int *idp)
{
int i;
if (wi_cmd(sc, WI_CMD_ALLOC_MEM, len, 0, 0)) {
device_printf(sc->sc_dev, "%s: failed to allocate %d bytes on NIC\n",
__func__, len);
return ENOMEM;
}
for (i = 0; i < WI_TIMEOUT; i++) {
if (CSR_READ_2(sc, WI_EVENT_STAT) & WI_EV_ALLOC)
break;
DELAY(1);
}
if (i == WI_TIMEOUT) {
device_printf(sc->sc_dev, "%s: timeout in alloc\n", __func__);
return ETIMEDOUT;
}
*idp = CSR_READ_2(sc, WI_ALLOC_FID);
CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC);
return 0;
}
static int
wi_read_rid(struct wi_softc *sc, int rid, void *buf, int *buflenp)
{
int error, len;
u_int16_t ltbuf[2];
/* Tell the NIC to enter record read mode. */
error = wi_cmd(sc, WI_CMD_ACCESS | WI_ACCESS_READ, rid, 0, 0);
if (error)
return error;
error = wi_read_bap(sc, rid, 0, ltbuf, sizeof(ltbuf));
if (error)
return error;
if (le16toh(ltbuf[1]) != rid) {
device_printf(sc->sc_dev, "record read mismatch, rid=%x, got=%x\n",
rid, le16toh(ltbuf[1]));
return EIO;
}
len = (le16toh(ltbuf[0]) - 1) * 2; /* already got rid */
if (*buflenp < len) {
device_printf(sc->sc_dev, "record buffer is too small, "
"rid=%x, size=%d, len=%d\n",
rid, *buflenp, len);
return ENOSPC;
}
*buflenp = len;
return wi_read_bap(sc, rid, sizeof(ltbuf), buf, len);
}
static int
-wi_write_rid(struct wi_softc *sc, int rid, void *buf, int buflen)
+wi_write_rid(struct wi_softc *sc, int rid, const void *buf, int buflen)
{
int error;
u_int16_t ltbuf[2];
ltbuf[0] = htole16((buflen + 1) / 2 + 1); /* includes rid */
ltbuf[1] = htole16(rid);
error = wi_write_bap(sc, rid, 0, ltbuf, sizeof(ltbuf));
if (error) {
device_printf(sc->sc_dev, "%s: bap0 write failure, rid 0x%x\n",
__func__, rid);
return error;
}
error = wi_write_bap(sc, rid, sizeof(ltbuf), buf, buflen);
if (error) {
device_printf(sc->sc_dev, "%s: bap1 write failure, rid 0x%x\n",
__func__, rid);
return error;
}
return wi_cmd(sc, WI_CMD_ACCESS | WI_ACCESS_WRITE, rid, 0, 0);
}
static int
wi_write_appie(struct wi_softc *sc, int rid, const struct ieee80211_appie *ie)
{
/* NB: 42 bytes is probably ok to have on the stack */
char buf[sizeof(uint16_t) + 40];
if (ie->ie_len > 40)
return EINVAL;
/* NB: firmware requires 16-bit ie length before ie data */
*(uint16_t *) buf = htole16(ie->ie_len);
memcpy(buf + sizeof(uint16_t), ie->ie_data, ie->ie_len);
return wi_write_rid(sc, rid, buf, ie->ie_len + sizeof(uint16_t));
}
int
wi_alloc(device_t dev, int rid)
{
struct wi_softc *sc = device_get_softc(dev);
if (sc->wi_bus_type != WI_BUS_PCI_NATIVE) {
sc->iobase_rid = rid;
sc->iobase = bus_alloc_resource(dev, SYS_RES_IOPORT,
&sc->iobase_rid, 0, ~0, (1 << 6),
rman_make_alignment_flags(1 << 6) | RF_ACTIVE);
if (sc->iobase == NULL) {
device_printf(dev, "No I/O space?!\n");
return ENXIO;
}
sc->wi_io_addr = rman_get_start(sc->iobase);
sc->wi_btag = rman_get_bustag(sc->iobase);
sc->wi_bhandle = rman_get_bushandle(sc->iobase);
} else {
sc->mem_rid = rid;
sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->mem_rid, RF_ACTIVE);
if (sc->mem == NULL) {
device_printf(dev, "No Mem space on prism2.5?\n");
return ENXIO;
}
sc->wi_btag = rman_get_bustag(sc->mem);
sc->wi_bhandle = rman_get_bushandle(sc->mem);
}
sc->irq_rid = 0;
sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
RF_ACTIVE |
((sc->wi_bus_type == WI_BUS_PCCARD) ? 0 : RF_SHAREABLE));
if (sc->irq == NULL) {
wi_free(dev);
device_printf(dev, "No irq?!\n");
return ENXIO;
}
sc->sc_dev = dev;
sc->sc_unit = device_get_unit(dev);
return 0;
}
void
wi_free(device_t dev)
{
struct wi_softc *sc = device_get_softc(dev);
if (sc->iobase != NULL) {
bus_release_resource(dev, SYS_RES_IOPORT, sc->iobase_rid, sc->iobase);
sc->iobase = NULL;
}
if (sc->irq != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq);
sc->irq = NULL;
}
if (sc->mem != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem);
sc->mem = NULL;
}
}
Index: head/sys/dev/wi/if_wi_macio.c
===================================================================
--- head/sys/dev/wi/if_wi_macio.c (revision 287196)
+++ head/sys/dev/wi/if_wi_macio.c (revision 287197)
@@ -1,144 +1,145 @@
/*-
* Copyright (c) 2013 Justin Hibbits
* All rights reserved.
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Lucent WaveLAN/IEEE 802.11 MacIO attachment for FreeBSD.
*
* Based on the PCMCIA driver
* Written by Bill Paul <wpaul@ctr.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
+#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/openfirm.h>
#include <machine/ofw_machdep.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/wi/if_wavelan_ieee.h>
#include <dev/wi/if_wireg.h>
#include <dev/wi/if_wivar.h>
#include <powerpc/powermac/maciovar.h>
static int wi_macio_probe(device_t);
static int wi_macio_attach(device_t);
static device_method_t wi_macio_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, wi_macio_probe),
DEVMETHOD(device_attach, wi_macio_attach),
DEVMETHOD(device_detach, wi_detach),
DEVMETHOD(device_shutdown, wi_shutdown),
{ 0, 0 }
};
static driver_t wi_macio_driver = {
"wi",
wi_macio_methods,
sizeof(struct wi_softc)
};
DRIVER_MODULE(wi, macio, wi_macio_driver, wi_devclass, 0, 0);
MODULE_DEPEND(wi, wlan, 1, 1, 1);
static int
wi_macio_probe(device_t dev)
{
const char *name, *compat;
/* Make sure we're a network driver */
name = ofw_bus_get_name(dev);
if (name == NULL)
return (ENXIO);
if (strcmp(name, "radio") != 0) {
return ENXIO;
}
compat = ofw_bus_get_compat(dev);
if (strcmp(compat, "wireless") != 0) {
return ENXIO;
}
device_set_desc(dev, "Apple Airport");
return 0;
}
static int
wi_macio_attach(device_t dev)
{
struct wi_softc *sc;
int error;
sc = device_get_softc(dev);
sc->wi_gone = 0;
sc->wi_bus_type = 0;
error = wi_alloc(dev, 0);
if (error == 0) {
macio_enable_wireless(device_get_parent(dev), 1);
/* Make sure interrupts are disabled. */
CSR_WRITE_2(sc, WI_INT_EN, 0);
CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF);
error = wi_attach(dev);
if (error != 0)
wi_free(dev);
}
return error;
}
Index: head/sys/dev/wi/if_wi_pccard.c
===================================================================
--- head/sys/dev/wi/if_wi_pccard.c (revision 287196)
+++ head/sys/dev/wi/if_wi_pccard.c (revision 287197)
@@ -1,201 +1,202 @@
/*-
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Lucent WaveLAN/IEEE 802.11 PCMCIA driver for FreeBSD.
*
* Written by Bill Paul <wpaul@ctr.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/socket.h>
#include <sys/systm.h>
+#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/pccard/pccardvar.h>
#include <dev/pccard/pccard_cis.h>
#include <dev/wi/if_wavelan_ieee.h>
#include <dev/wi/if_wireg.h>
#include <dev/wi/if_wivar.h>
#include "card_if.h"
#include "pccarddevs.h"
static int wi_pccard_probe(device_t);
static int wi_pccard_attach(device_t);
static device_method_t wi_pccard_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, wi_pccard_probe),
DEVMETHOD(device_attach, wi_pccard_attach),
DEVMETHOD(device_detach, wi_detach),
DEVMETHOD(device_shutdown, wi_shutdown),
{ 0, 0 }
};
static driver_t wi_pccard_driver = {
"wi",
wi_pccard_methods,
sizeof(struct wi_softc)
};
DRIVER_MODULE(wi, pccard, wi_pccard_driver, wi_devclass, 0, 0);
MODULE_DEPEND(wi, wlan, 1, 1, 1);
static const struct pccard_product wi_pccard_products[] = {
PCMCIA_CARD(3COM, 3CRWE737A),
PCMCIA_CARD(3COM, 3CRWE777A),
PCMCIA_CARD(ACTIONTEC, PRISM),
PCMCIA_CARD(ADAPTEC2, ANW8030),
PCMCIA_CARD(ADDTRON, AWP100),
PCMCIA_CARD(AIRVAST, WN_100B),
PCMCIA_CARD(AIRVAST, WN_100),
PCMCIA_CARD(ALLIEDTELESIS, WR211PCM),
PCMCIA_CARD(ARTEM, ONAIR),
PCMCIA_CARD(ASUS, WL100),
PCMCIA_CARD(BAY, EMOBILITY_11B),
PCMCIA_CARD(BROMAX, IWN),
PCMCIA_CARD(BROMAX, IWN3),
PCMCIA_CARD(BROMAX, WCF11),
PCMCIA_CARD(BUFFALO, WLI_CF_S11G),
PCMCIA_CARD(BUFFALO, WLI_PCM_S11),
PCMCIA_CARD(COMPAQ, NC5004),
PCMCIA_CARD(CONTEC, FX_DS110_PCC),
PCMCIA_CARD(COREGA, WIRELESS_LAN_PCC_11),
PCMCIA_CARD(COREGA, WIRELESS_LAN_PCCA_11),
PCMCIA_CARD(COREGA, WIRELESS_LAN_PCCB_11),
PCMCIA_CARD(COREGA, WIRELESS_LAN_PCCL_11),
PCMCIA_CARD(DLINK, DWL650H),
PCMCIA_CARD(ELSA, XI300_IEEE),
PCMCIA_CARD(ELSA, XI325_IEEE),
PCMCIA_CARD(ELSA, APDL325_IEEE),
PCMCIA_CARD(ELSA, XI330_IEEE),
PCMCIA_CARD(ELSA, XI800_IEEE),
PCMCIA_CARD(ELSA, WIFI_FLASH),
PCMCIA_CARD(EMTAC, WLAN),
PCMCIA_CARD(ERICSSON, WIRELESSLAN),
PCMCIA_CARD(GEMTEK, WLAN),
PCMCIA_CARD(HWN, AIRWAY80211),
PCMCIA_CARD(INTEL, PRO_WLAN_2011),
PCMCIA_CARD(INTERSIL, ISL37100P),
PCMCIA_CARD(INTERSIL, ISL37110P),
PCMCIA_CARD(INTERSIL, ISL37300P),
PCMCIA_CARD(INTERSIL2, PRISM2),
PCMCIA_CARD(IODATA2, WCF12),
PCMCIA_CARD(IODATA2, WNB11PCM),
PCMCIA_CARD(FUJITSU, WL110),
PCMCIA_CARD(LUCENT, WAVELAN_IEEE),
PCMCIA_CARD(MICROSOFT, MN_520),
PCMCIA_CARD(NOKIA, C020_WLAN),
PCMCIA_CARD(NOKIA, C110_WLAN),
PCMCIA_CARD(PLANEX, GWNS11H),
PCMCIA_CARD(PROXIM, HARMONY),
PCMCIA_CARD(PROXIM, RANGELANDS_8430),
PCMCIA_CARD(SAMSUNG, SWL_2000N),
PCMCIA_CARD(SIEMENS, SS1021),
PCMCIA_CARD(SIEMENS, SS1021A),
PCMCIA_CARD(SIMPLETECH, SPECTRUM24_ALT),
PCMCIA_CARD(SOCKET, LP_WLAN_CF),
PCMCIA_CARD(TDK, LAK_CD011WL),
{ NULL }
};
static int
wi_pccard_probe(device_t dev)
{
const struct pccard_product *pp;
u_int32_t fcn = PCCARD_FUNCTION_UNSPEC;
int error;
/* Make sure we're a network driver */
error = pccard_get_function(dev, &fcn);
if (error != 0)
return error;
if (fcn != PCCARD_FUNCTION_NETWORK)
return ENXIO;
pp = pccard_product_lookup(dev, wi_pccard_products,
sizeof(wi_pccard_products[0]), NULL);
if (pp != NULL) {
if (pp->pp_name != NULL)
device_set_desc(dev, pp->pp_name);
return 0;
}
return ENXIO;
}
static int
wi_pccard_attach(device_t dev)
{
struct wi_softc *sc;
int error;
sc = device_get_softc(dev);
sc->wi_gone = 0;
sc->wi_bus_type = WI_BUS_PCCARD;
error = wi_alloc(dev, 0);
if (error == 0) {
/* Make sure interrupts are disabled. */
CSR_WRITE_2(sc, WI_INT_EN, 0);
CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF);
error = wi_attach(dev);
if (error != 0)
wi_free(dev);
}
return error;
}
Index: head/sys/dev/wi/if_wi_pci.c
===================================================================
--- head/sys/dev/wi/if_wi_pci.c (revision 287196)
+++ head/sys/dev/wi/if_wi_pci.c (revision 287197)
@@ -1,262 +1,263 @@
/*-
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Lucent WaveLAN/IEEE 802.11 PCMCIA driver for FreeBSD.
*
* Written by Bill Paul <wpaul@ctr.columbia.edu>
* Electrical Engineering Department
* Columbia University, New York City
*/
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <dev/wi/if_wavelan_ieee.h>
#include <dev/wi/if_wireg.h>
#include <dev/wi/if_wivar.h>
static int wi_pci_probe(device_t);
static int wi_pci_attach(device_t);
static int wi_pci_suspend(device_t);
static int wi_pci_resume(device_t);
static device_method_t wi_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, wi_pci_probe),
DEVMETHOD(device_attach, wi_pci_attach),
DEVMETHOD(device_detach, wi_detach),
DEVMETHOD(device_shutdown, wi_shutdown),
DEVMETHOD(device_suspend, wi_pci_suspend),
DEVMETHOD(device_resume, wi_pci_resume),
{ 0, 0 }
};
static driver_t wi_pci_driver = {
"wi",
wi_pci_methods,
sizeof(struct wi_softc)
};
static struct {
unsigned int vendor,device;
int bus_type;
char *desc;
} pci_ids[] = {
/* Sorted by description */
{0x10b7, 0x7770, WI_BUS_PCI_PLX, "3Com Airconnect"},
{0x16ab, 0x1101, WI_BUS_PCI_PLX, "GLPRISM2 WaveLAN"},
{0x1260, 0x3872, WI_BUS_PCI_NATIVE, "Intersil Prism3"},
{0x1260, 0x3873, WI_BUS_PCI_NATIVE, "Intersil Prism2.5"},
{0x16ab, 0x1102, WI_BUS_PCI_PLX, "Linksys WDT11"},
{0x1385, 0x4100, WI_BUS_PCI_PLX, "Netgear MA301"},
{0x1638, 0x1100, WI_BUS_PCI_PLX, "PRISM2STA WaveLAN"},
{0x111a, 0x1023, WI_BUS_PCI_PLX, "Siemens SpeedStream"},
{0x10b5, 0x9050, WI_BUS_PCI_PLX, "SMC 2602W"},
{0x16ec, 0x3685, WI_BUS_PCI_PLX, "US Robotics 2415"},
{0x4033, 0x7001, WI_BUS_PCI_PLX, "Addtron AWA-100 PCI"},
{0, 0, 0, NULL}
};
DRIVER_MODULE(wi, pci, wi_pci_driver, wi_devclass, 0, 0);
MODULE_DEPEND(wi, pci, 1, 1, 1);
MODULE_DEPEND(wi, wlan, 1, 1, 1);
static int
wi_pci_probe(dev)
device_t dev;
{
struct wi_softc *sc;
int i;
sc = device_get_softc(dev);
for(i=0; pci_ids[i].vendor != 0; i++) {
if ((pci_get_vendor(dev) == pci_ids[i].vendor) &&
(pci_get_device(dev) == pci_ids[i].device)) {
sc->wi_bus_type = pci_ids[i].bus_type;
device_set_desc(dev, pci_ids[i].desc);
return (BUS_PROBE_DEFAULT);
}
}
return(ENXIO);
}
static int
wi_pci_attach(device_t dev)
{
struct wi_softc *sc;
u_int32_t command;
u_int16_t reg;
int error;
int timeout;
sc = device_get_softc(dev);
if (sc->wi_bus_type != WI_BUS_PCI_NATIVE) {
error = wi_alloc(dev, WI_PCI_IORES);
if (error)
return (error);
/* Make sure interrupts are disabled. */
CSR_WRITE_2(sc, WI_INT_EN, 0);
CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF);
/* We have to do a magic PLX poke to enable interrupts */
sc->local_rid = WI_PCI_LOCALRES;
sc->local = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
&sc->local_rid, RF_ACTIVE);
sc->wi_localtag = rman_get_bustag(sc->local);
sc->wi_localhandle = rman_get_bushandle(sc->local);
command = bus_space_read_4(sc->wi_localtag, sc->wi_localhandle,
WI_LOCAL_INTCSR);
command |= WI_LOCAL_INTEN;
bus_space_write_4(sc->wi_localtag, sc->wi_localhandle,
WI_LOCAL_INTCSR, command);
bus_release_resource(dev, SYS_RES_IOPORT, sc->local_rid,
sc->local);
sc->local = NULL;
sc->mem_rid = WI_PCI_MEMRES;
sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&sc->mem_rid, RF_ACTIVE);
if (sc->mem == NULL) {
device_printf(dev, "couldn't allocate memory\n");
wi_free(dev);
return (ENXIO);
}
sc->wi_bmemtag = rman_get_bustag(sc->mem);
sc->wi_bmemhandle = rman_get_bushandle(sc->mem);
/*
* Write COR to enable PC card
* This is a subset of the protocol that the pccard bus code
* would do. In theory, we should parse the CIS to find the
* COR offset. In practice, the COR_OFFSET is always 0x3e0.
*/
CSM_WRITE_1(sc, WI_COR_OFFSET, WI_COR_VALUE);
reg = CSM_READ_1(sc, WI_COR_OFFSET);
if (reg != WI_COR_VALUE) {
device_printf(dev, "CSM_READ_1(WI_COR_OFFSET) "
"wanted %d, got %d\n", WI_COR_VALUE, reg);
wi_free(dev);
return (ENXIO);
}
} else {
error = wi_alloc(dev, WI_PCI_LMEMRES);
if (error)
return (error);
CSR_WRITE_2(sc, WI_PCICOR_OFF, WI_PCICOR_RESET);
DELAY(250000);
CSR_WRITE_2(sc, WI_PCICOR_OFF, 0x0000);
DELAY(500000);
timeout=2000000;
while ((--timeout > 0) &&
(CSR_READ_2(sc, WI_COMMAND) & WI_CMD_BUSY))
DELAY(10);
if (timeout == 0) {
device_printf(dev, "couldn't reset prism pci core.\n");
wi_free(dev);
return(ENXIO);
}
}
CSR_WRITE_2(sc, WI_HFA384X_SWSUPPORT0_OFF, WI_PRISM2STA_MAGIC);
reg = CSR_READ_2(sc, WI_HFA384X_SWSUPPORT0_OFF);
if (reg != WI_PRISM2STA_MAGIC) {
device_printf(dev,
"CSR_READ_2(WI_HFA384X_SWSUPPORT0_OFF) "
"wanted %d, got %d\n", WI_PRISM2STA_MAGIC, reg);
wi_free(dev);
return (ENXIO);
}
error = wi_attach(dev);
if (error != 0)
wi_free(dev);
return (error);
}
static int
wi_pci_suspend(device_t dev)
{
struct wi_softc *sc = device_get_softc(dev);
+ WI_LOCK(sc);
wi_stop(sc, 1);
+ WI_UNLOCK(sc);
return (0);
}
static int
wi_pci_resume(device_t dev)
{
struct wi_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
- if (sc->wi_bus_type != WI_BUS_PCI_NATIVE)
+ WI_LOCK(sc);
+ if (sc->wi_bus_type != WI_BUS_PCI_NATIVE) {
return (0);
-
- if (ifp->if_flags & IFF_UP) {
- ifp->if_init(ifp->if_softc);
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ifp->if_start(ifp);
+ WI_UNLOCK(sc);
}
-
+ if (ic->ic_nrunning > 0)
+ wi_init(sc);
+ WI_UNLOCK(sc);
return (0);
}
Index: head/sys/dev/wi/if_wivar.h
===================================================================
--- head/sys/dev/wi/if_wivar.h (revision 287196)
+++ head/sys/dev/wi/if_wivar.h (revision 287197)
@@ -1,186 +1,188 @@
/*-
* Copyright (c) 2002
* M Warner Losh <imp@freebsd.org>. All rights reserved.
* Copyright (c) 1997, 1998, 1999
* Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Bill Paul.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Encryption controls. We can enable or disable encryption as
* well as specify up to 4 encryption keys. We can also specify
* which of the four keys will be used for transmit encryption.
*/
#define WI_RID_ENCRYPTION 0xFC20
#define WI_RID_AUTHTYPE 0xFC21
#define WI_RID_DEFLT_CRYPT_KEYS 0xFCB0
#define WI_RID_TX_CRYPT_KEY 0xFCB1
#define WI_RID_WEP_AVAIL 0xFD4F
#define WI_RID_P2_TX_CRYPT_KEY 0xFC23
#define WI_RID_P2_CRYPT_KEY0 0xFC24
#define WI_RID_P2_CRYPT_KEY1 0xFC25
#define WI_RID_MICROWAVE_OVEN 0xFC25
#define WI_RID_P2_CRYPT_KEY2 0xFC26
#define WI_RID_P2_CRYPT_KEY3 0xFC27
#define WI_RID_P2_ENCRYPTION 0xFC28
#define WI_RID_ROAMING_MODE 0xFC2D
#define WI_RID_CUR_TX_RATE 0xFD44 /* current TX rate */
#define WI_MAX_AID 256 /* max stations for ap operation */
struct wi_vap {
struct ieee80211vap wv_vap;
struct ieee80211_beacon_offsets wv_bo;
void (*wv_recv_mgmt)(struct ieee80211_node *, struct mbuf *,
int, const struct ieee80211_rx_stats *rxs, int, int);
int (*wv_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
};
#define WI_VAP(vap) ((struct wi_vap *)(vap))
struct wi_softc {
- struct ifnet *sc_ifp;
+ struct ieee80211com sc_ic;
+ struct mbufq sc_snd;
device_t sc_dev;
struct mtx sc_mtx;
struct callout sc_watchdog;
int sc_unit;
int wi_gone;
int sc_enabled;
int sc_reset;
int sc_firmware_type;
#define WI_NOTYPE 0
#define WI_LUCENT 1
#define WI_INTERSIL 2
#define WI_SYMBOL 3
int sc_pri_firmware_ver; /* Primary firmware */
int sc_sta_firmware_ver; /* Station firmware */
unsigned int sc_nic_id; /* Type of NIC */
char * sc_nic_name;
int wi_bus_type; /* Bus attachment type */
struct resource * local;
int local_rid;
struct resource * iobase;
int iobase_rid;
struct resource * irq;
int irq_rid;
struct resource * mem;
int mem_rid;
bus_space_handle_t wi_localhandle;
bus_space_tag_t wi_localtag;
bus_space_handle_t wi_bhandle;
bus_space_tag_t wi_btag;
bus_space_handle_t wi_bmemhandle;
bus_space_tag_t wi_bmemtag;
void * wi_intrhand;
struct ieee80211_channel *wi_channel;
int wi_io_addr;
int wi_cmd_count;
int sc_flags;
- int sc_if_flags;
int sc_bap_id;
int sc_bap_off;
int sc_porttype;
u_int16_t sc_portnum;
u_int16_t sc_encryption;
u_int16_t sc_monitor_port;
/* RSSI interpretation */
u_int16_t sc_min_rssi; /* clamp sc_min_rssi < RSSI */
u_int16_t sc_max_rssi; /* clamp RSSI < sc_max_rssi */
u_int16_t sc_dbm_offset; /* dBm ~ RSSI - sc_dbm_offset */
int sc_buflen; /* TX buffer size */
int sc_ntxbuf;
#define WI_NTXBUF 3
struct {
int d_fid;
int d_len;
} sc_txd[WI_NTXBUF]; /* TX buffers */
int sc_txnext; /* index of next TX */
int sc_txcur; /* index of current TX*/
int sc_tx_timer;
struct wi_counters sc_stats;
u_int16_t sc_ibss_port;
struct timeval sc_last_syn;
int sc_false_syns;
u_int16_t sc_txbuf[IEEE80211_MAX_LEN/2];
struct wi_tx_radiotap_header sc_tx_th;
struct wi_rx_radiotap_header sc_rx_th;
};
/* maximum consecutive false change-of-BSSID indications */
#define WI_MAX_FALSE_SYNS 10
#define WI_FLAGS_HAS_ENHSECURITY 0x0001
#define WI_FLAGS_HAS_WPASUPPORT 0x0002
#define WI_FLAGS_HAS_ROAMING 0x0020
#define WI_FLAGS_HAS_FRAGTHR 0x0200
#define WI_FLAGS_HAS_DBMADJUST 0x0400
+#define WI_FLAGS_RUNNING 0x0800
+#define WI_FLAGS_PROMISC 0x1000
struct wi_card_ident {
u_int16_t card_id;
char *card_name;
u_int8_t firm_type;
};
#define WI_PRISM_MIN_RSSI 0x1b
#define WI_PRISM_MAX_RSSI 0x9a
#define WI_PRISM_DBM_OFFSET 100 /* XXX */
#define WI_LUCENT_MIN_RSSI 47
#define WI_LUCENT_MAX_RSSI 138
#define WI_LUCENT_DBM_OFFSET 149
#define WI_RSSI_TO_DBM(sc, rssi) (MIN((sc)->sc_max_rssi, \
MAX((sc)->sc_min_rssi, (rssi))) - (sc)->sc_dbm_offset)
#define WI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define WI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define WI_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
int wi_attach(device_t);
int wi_detach(device_t);
int wi_shutdown(device_t);
int wi_alloc(device_t, int);
void wi_free(device_t);
extern devclass_t wi_devclass;
-void wi_init(void *);
void wi_intr(void *);
int wi_mgmt_xmit(struct wi_softc *, caddr_t, int);
void wi_stop(struct wi_softc *, int);
+void wi_init(struct wi_softc *);
Index: head/sys/dev/wpi/if_wpi.c
===================================================================
--- head/sys/dev/wpi/if_wpi.c (revision 287196)
+++ head/sys/dev/wpi/if_wpi.c (revision 287197)
@@ -1,5658 +1,5637 @@
/*-
* Copyright (c) 2006,2007
* Damien Bergamini <damien.bergamini@free.fr>
* Benjamin Close <Benjamin.Close@clearchain.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters.
*
* The 3945ABG network adapter doesn't use traditional hardware as
* many other adaptors do. Instead at run time the eeprom is set into a known
* state and told to load boot firmware. The boot firmware loads an init and a
* main binary firmware image into SRAM on the card via DMA.
* Once the firmware is loaded, the driver/hw then
* communicate by way of circular dma rings via the SRAM to the firmware.
*
* There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings.
* The 4 tx data rings allow for prioritization QoS.
*
* The rx data ring consists of 32 dma buffers. Two registers are used to
* indicate where in the ring the driver and the firmware are up to. The
* driver sets the initial read index (reg1) and the initial write index (reg2),
* the firmware updates the read index (reg1) on rx of a packet and fires an
* interrupt. The driver then processes the buffers starting at reg1 indicating
* to the firmware which buffers have been accessed by updating reg2. At the
* same time allocating new memory for the processed buffer.
*
* A similar thing happens with the tx rings. The difference is the firmware
* stop processing buffers once the queue is full and until confirmation
* of a successful transmition (tx_done) has occurred.
*
* The command ring operates in the same manner as the tx queues.
*
* All communication direct to the card (ie eeprom) is classed as Stage1
* communication
*
* All communication via the firmware to the card is classed as State2.
* The firmware consists of 2 parts. A bootstrap firmware and a runtime
* firmware. The bootstrap firmware and runtime firmware are loaded
* from host memory via dma to the card then told to execute. From this point
* on the majority of communications between the driver and the card goes
* via the firmware.
*/
#include "opt_wlan.h"
#include "opt_wpi.h"
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/linker.h>
#include <sys/firmware.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/in_var.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_ratectl.h>
#include <dev/wpi/if_wpireg.h>
#include <dev/wpi/if_wpivar.h>
#include <dev/wpi/if_wpi_debug.h>
struct wpi_ident {
uint16_t vendor;
uint16_t device;
uint16_t subdevice;
const char *name;
};
static const struct wpi_ident wpi_ident_table[] = {
/* The below entries support ABG regardless of the subid */
{ 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" },
{ 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" },
/* The below entries only support BG */
{ 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945BG" },
{ 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945BG" },
{ 0x8086, 0x4227, 0x1014, "Intel(R) PRO/Wireless 3945BG" },
{ 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945BG" },
{ 0, 0, 0, NULL }
};
static int wpi_probe(device_t);
static int wpi_attach(device_t);
static void wpi_radiotap_attach(struct wpi_softc *);
static void wpi_sysctlattach(struct wpi_softc *);
static void wpi_init_beacon(struct wpi_vap *);
static struct ieee80211vap *wpi_vap_create(struct ieee80211com *,
const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
static void wpi_vap_delete(struct ieee80211vap *);
static int wpi_detach(device_t);
static int wpi_shutdown(device_t);
static int wpi_suspend(device_t);
static int wpi_resume(device_t);
static int wpi_nic_lock(struct wpi_softc *);
static int wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int);
static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int);
static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *,
void **, bus_size_t, bus_size_t);
static void wpi_dma_contig_free(struct wpi_dma_info *);
static int wpi_alloc_shared(struct wpi_softc *);
static void wpi_free_shared(struct wpi_softc *);
static int wpi_alloc_fwmem(struct wpi_softc *);
static void wpi_free_fwmem(struct wpi_softc *);
static int wpi_alloc_rx_ring(struct wpi_softc *);
static void wpi_update_rx_ring(struct wpi_softc *);
static void wpi_update_rx_ring_ps(struct wpi_softc *);
static void wpi_reset_rx_ring(struct wpi_softc *);
static void wpi_free_rx_ring(struct wpi_softc *);
static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *,
int);
static void wpi_update_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
static void wpi_update_tx_ring_ps(struct wpi_softc *,
struct wpi_tx_ring *);
static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *);
static int wpi_read_eeprom(struct wpi_softc *,
uint8_t macaddr[IEEE80211_ADDR_LEN]);
static uint32_t wpi_eeprom_channel_flags(struct wpi_eeprom_chan *);
static void wpi_read_eeprom_band(struct wpi_softc *, int);
static int wpi_read_eeprom_channels(struct wpi_softc *, int);
static struct wpi_eeprom_chan *wpi_find_eeprom_channel(struct wpi_softc *,
struct ieee80211_channel *);
static int wpi_setregdomain(struct ieee80211com *,
struct ieee80211_regdomain *, int,
struct ieee80211_channel[]);
static int wpi_read_eeprom_group(struct wpi_softc *, int);
static int wpi_add_node_entry_adhoc(struct wpi_softc *);
static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
static void wpi_node_free(struct ieee80211_node *);
static void wpi_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
const struct ieee80211_rx_stats *,
int, int);
static void wpi_restore_node(void *, struct ieee80211_node *);
static void wpi_restore_node_table(struct wpi_softc *, struct wpi_vap *);
static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void wpi_calib_timeout(void *);
static void wpi_rx_done(struct wpi_softc *, struct wpi_rx_desc *,
struct wpi_rx_data *);
static void wpi_rx_statistics(struct wpi_softc *, struct wpi_rx_desc *,
struct wpi_rx_data *);
static void wpi_tx_done(struct wpi_softc *, struct wpi_rx_desc *);
static void wpi_cmd_done(struct wpi_softc *, struct wpi_rx_desc *);
static void wpi_notif_intr(struct wpi_softc *);
static void wpi_wakeup_intr(struct wpi_softc *);
#ifdef WPI_DEBUG
static void wpi_debug_registers(struct wpi_softc *);
#endif
static void wpi_fatal_intr(struct wpi_softc *);
static void wpi_intr(void *);
static int wpi_cmd2(struct wpi_softc *, struct wpi_buf *);
static int wpi_tx_data(struct wpi_softc *, struct mbuf *,
struct ieee80211_node *);
static int wpi_tx_data_raw(struct wpi_softc *, struct mbuf *,
struct ieee80211_node *,
const struct ieee80211_bpf_params *);
static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
-static void wpi_start(struct ifnet *);
-static void wpi_start_task(void *, int);
+static int wpi_transmit(struct ieee80211com *, struct mbuf *);
+static void wpi_start(void *, int);
static void wpi_watchdog_rfkill(void *);
static void wpi_scan_timeout(void *);
static void wpi_tx_timeout(void *);
-static int wpi_ioctl(struct ifnet *, u_long, caddr_t);
+static void wpi_parent(struct ieee80211com *);
static int wpi_cmd(struct wpi_softc *, int, const void *, size_t, int);
static int wpi_mrr_setup(struct wpi_softc *);
static int wpi_add_node(struct wpi_softc *, struct ieee80211_node *);
static int wpi_add_broadcast_node(struct wpi_softc *, int);
static int wpi_add_ibss_node(struct wpi_softc *, struct ieee80211_node *);
static void wpi_del_node(struct wpi_softc *, struct ieee80211_node *);
static int wpi_updateedca(struct ieee80211com *);
static void wpi_set_promisc(struct wpi_softc *);
static void wpi_update_promisc(struct ieee80211com *);
static void wpi_update_mcast(struct ieee80211com *);
static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t);
static int wpi_set_timing(struct wpi_softc *, struct ieee80211_node *);
static void wpi_power_calibration(struct wpi_softc *);
static int wpi_set_txpower(struct wpi_softc *, int);
static int wpi_get_power_index(struct wpi_softc *,
struct wpi_power_group *, uint8_t, int, int);
static int wpi_set_pslevel(struct wpi_softc *, uint8_t, int, int);
static int wpi_send_btcoex(struct wpi_softc *);
static int wpi_send_rxon(struct wpi_softc *, int, int);
static int wpi_config(struct wpi_softc *);
static uint16_t wpi_get_active_dwell_time(struct wpi_softc *,
struct ieee80211_channel *, uint8_t);
static uint16_t wpi_limit_dwell(struct wpi_softc *, uint16_t);
static uint16_t wpi_get_passive_dwell_time(struct wpi_softc *,
struct ieee80211_channel *);
static uint32_t wpi_get_scan_pause_time(uint32_t, uint16_t);
static int wpi_scan(struct wpi_softc *, struct ieee80211_channel *);
static int wpi_auth(struct wpi_softc *, struct ieee80211vap *);
static int wpi_config_beacon(struct wpi_vap *);
static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *);
static void wpi_update_beacon(struct ieee80211vap *, int);
static void wpi_newassoc(struct ieee80211_node *, int);
static int wpi_run(struct wpi_softc *, struct ieee80211vap *);
static int wpi_load_key(struct ieee80211_node *,
const struct ieee80211_key *);
static void wpi_load_key_cb(void *, struct ieee80211_node *);
static int wpi_set_global_keys(struct ieee80211_node *);
static int wpi_del_key(struct ieee80211_node *,
const struct ieee80211_key *);
static void wpi_del_key_cb(void *, struct ieee80211_node *);
static int wpi_process_key(struct ieee80211vap *,
const struct ieee80211_key *, int);
static int wpi_key_set(struct ieee80211vap *,
const struct ieee80211_key *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
static int wpi_key_delete(struct ieee80211vap *,
const struct ieee80211_key *);
static int wpi_post_alive(struct wpi_softc *);
static int wpi_load_bootcode(struct wpi_softc *, const uint8_t *, int);
static int wpi_load_firmware(struct wpi_softc *);
static int wpi_read_firmware(struct wpi_softc *);
static void wpi_unload_firmware(struct wpi_softc *);
static int wpi_clock_wait(struct wpi_softc *);
static int wpi_apm_init(struct wpi_softc *);
static void wpi_apm_stop_master(struct wpi_softc *);
static void wpi_apm_stop(struct wpi_softc *);
static void wpi_nic_config(struct wpi_softc *);
static int wpi_hw_init(struct wpi_softc *);
static void wpi_hw_stop(struct wpi_softc *);
static void wpi_radio_on(void *, int);
static void wpi_radio_off(void *, int);
-static void wpi_init(void *);
+static int wpi_init(struct wpi_softc *);
static void wpi_stop_locked(struct wpi_softc *);
static void wpi_stop(struct wpi_softc *);
static void wpi_scan_start(struct ieee80211com *);
static void wpi_scan_end(struct ieee80211com *);
static void wpi_set_channel(struct ieee80211com *);
static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long);
static void wpi_scan_mindwell(struct ieee80211_scan_state *);
static void wpi_hw_reset(void *, int);
static device_method_t wpi_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, wpi_probe),
DEVMETHOD(device_attach, wpi_attach),
DEVMETHOD(device_detach, wpi_detach),
DEVMETHOD(device_shutdown, wpi_shutdown),
DEVMETHOD(device_suspend, wpi_suspend),
DEVMETHOD(device_resume, wpi_resume),
DEVMETHOD_END
};
static driver_t wpi_driver = {
"wpi",
wpi_methods,
sizeof (struct wpi_softc)
};
static devclass_t wpi_devclass;
DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, NULL, NULL);
MODULE_VERSION(wpi, 1);
MODULE_DEPEND(wpi, pci, 1, 1, 1);
MODULE_DEPEND(wpi, wlan, 1, 1, 1);
MODULE_DEPEND(wpi, firmware, 1, 1, 1);
static int
wpi_probe(device_t dev)
{
const struct wpi_ident *ident;
for (ident = wpi_ident_table; ident->name != NULL; ident++) {
if (pci_get_vendor(dev) == ident->vendor &&
pci_get_device(dev) == ident->device) {
device_set_desc(dev, ident->name);
return (BUS_PROBE_DEFAULT);
}
}
return ENXIO;
}
static int
wpi_attach(device_t dev)
{
struct wpi_softc *sc = (struct wpi_softc *)device_get_softc(dev);
struct ieee80211com *ic;
- struct ifnet *ifp;
int i, error, rid;
#ifdef WPI_DEBUG
int supportsa = 1;
const struct wpi_ident *ident;
#endif
- uint8_t macaddr[IEEE80211_ADDR_LEN];
sc->sc_dev = dev;
#ifdef WPI_DEBUG
error = resource_int_value(device_get_name(sc->sc_dev),
device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug));
if (error != 0)
sc->sc_debug = 0;
#else
sc->sc_debug = 0;
#endif
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
/*
* Get the offset of the PCI Express Capability Structure in PCI
* Configuration Space.
*/
error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
if (error != 0) {
device_printf(dev, "PCIe capability structure not found!\n");
return error;
}
/*
* Some card's only support 802.11b/g not a, check to see if
* this is one such card. A 0x0 in the subdevice table indicates
* the entire subdevice range is to be ignored.
*/
#ifdef WPI_DEBUG
for (ident = wpi_ident_table; ident->name != NULL; ident++) {
if (ident->subdevice &&
pci_get_subdevice(dev) == ident->subdevice) {
supportsa = 0;
break;
}
}
#endif
/* Clear device-specific "PCI retry timeout" register (41h). */
pci_write_config(dev, 0x41, 0, 1);
/* Enable bus-mastering. */
pci_enable_busmaster(dev);
rid = PCIR_BAR(0);
sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (sc->mem == NULL) {
device_printf(dev, "can't map mem space\n");
return ENOMEM;
}
sc->sc_st = rman_get_bustag(sc->mem);
sc->sc_sh = rman_get_bushandle(sc->mem);
i = 1;
rid = 0;
if (pci_alloc_msi(dev, &i) == 0)
rid = 1;
/* Install interrupt handler. */
sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
(rid != 0 ? 0 : RF_SHAREABLE));
if (sc->irq == NULL) {
device_printf(dev, "can't map interrupt\n");
error = ENOMEM;
goto fail;
}
WPI_LOCK_INIT(sc);
WPI_TX_LOCK_INIT(sc);
WPI_RXON_LOCK_INIT(sc);
WPI_NT_LOCK_INIT(sc);
WPI_TXQ_LOCK_INIT(sc);
WPI_TXQ_STATE_LOCK_INIT(sc);
/* Allocate DMA memory for firmware transfers. */
if ((error = wpi_alloc_fwmem(sc)) != 0) {
device_printf(dev,
"could not allocate memory for firmware, error %d\n",
error);
goto fail;
}
/* Allocate shared page. */
if ((error = wpi_alloc_shared(sc)) != 0) {
device_printf(dev, "could not allocate shared page\n");
goto fail;
}
/* Allocate TX rings - 4 for QoS purposes, 1 for commands. */
for (i = 0; i < WPI_NTXQUEUES; i++) {
if ((error = wpi_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
device_printf(dev,
"could not allocate TX ring %d, error %d\n", i,
error);
goto fail;
}
}
/* Allocate RX ring. */
if ((error = wpi_alloc_rx_ring(sc)) != 0) {
device_printf(dev, "could not allocate RX ring, error %d\n",
error);
goto fail;
}
/* Clear pending interrupts. */
WPI_WRITE(sc, WPI_INT, 0xffffffff);
- ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- goto fail;
- }
-
- ic = ifp->if_l2com;
- ic->ic_ifp = ifp;
+ ic = &sc->sc_ic;
ic->ic_softc = sc;
ic->ic_name = device_get_nameunit(dev);
ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
/* Set device capabilities. */
ic->ic_caps =
IEEE80211_C_STA /* station mode supported */
| IEEE80211_C_IBSS /* IBSS mode supported */
| IEEE80211_C_HOSTAP /* Host access point mode */
| IEEE80211_C_MONITOR /* monitor mode supported */
| IEEE80211_C_AHDEMO /* adhoc demo mode */
| IEEE80211_C_BGSCAN /* capable of bg scanning */
| IEEE80211_C_TXPMGT /* tx power management */
| IEEE80211_C_SHSLOT /* short slot time supported */
| IEEE80211_C_WPA /* 802.11i */
| IEEE80211_C_SHPREAMBLE /* short preamble supported */
| IEEE80211_C_WME /* 802.11e */
| IEEE80211_C_PMGT /* Station-side power mgmt */
;
ic->ic_cryptocaps =
IEEE80211_CRYPTO_AES_CCM;
/*
* Read in the eeprom and also setup the channels for
* net80211. We don't set the rates as net80211 does this for us
*/
- if ((error = wpi_read_eeprom(sc, macaddr)) != 0) {
+ if ((error = wpi_read_eeprom(sc, ic->ic_macaddr)) != 0) {
device_printf(dev, "could not read EEPROM, error %d\n",
error);
goto fail;
}
#ifdef WPI_DEBUG
if (bootverbose) {
device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n",
sc->domain);
device_printf(sc->sc_dev, "Hardware Type: %c\n",
sc->type > 1 ? 'B': '?');
device_printf(sc->sc_dev, "Hardware Revision: %c\n",
((sc->rev & 0xf0) == 0xd0) ? 'D': '?');
device_printf(sc->sc_dev, "SKU %s support 802.11a\n",
supportsa ? "does" : "does not");
/* XXX hw_config uses the PCIDEV for the Hardware rev. Must
check what sc->rev really represents - benjsc 20070615 */
}
#endif
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_softc = sc;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_init = wpi_init;
- ifp->if_ioctl = wpi_ioctl;
- ifp->if_start = wpi_start;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ieee80211_ifattach(ic, macaddr);
+ ieee80211_ifattach(ic);
ic->ic_vap_create = wpi_vap_create;
ic->ic_vap_delete = wpi_vap_delete;
+ ic->ic_parent = wpi_parent;
ic->ic_raw_xmit = wpi_raw_xmit;
+ ic->ic_transmit = wpi_transmit;
ic->ic_node_alloc = wpi_node_alloc;
sc->sc_node_free = ic->ic_node_free;
ic->ic_node_free = wpi_node_free;
ic->ic_wme.wme_update = wpi_updateedca;
ic->ic_update_promisc = wpi_update_promisc;
ic->ic_update_mcast = wpi_update_mcast;
ic->ic_newassoc = wpi_newassoc;
ic->ic_scan_start = wpi_scan_start;
ic->ic_scan_end = wpi_scan_end;
ic->ic_set_channel = wpi_set_channel;
ic->ic_scan_curchan = wpi_scan_curchan;
ic->ic_scan_mindwell = wpi_scan_mindwell;
ic->ic_setregdomain = wpi_setregdomain;
sc->sc_update_rx_ring = wpi_update_rx_ring;
sc->sc_update_tx_ring = wpi_update_tx_ring;
wpi_radiotap_attach(sc);
callout_init_mtx(&sc->calib_to, &sc->rxon_mtx, 0);
callout_init_mtx(&sc->scan_timeout, &sc->rxon_mtx, 0);
callout_init_mtx(&sc->tx_timeout, &sc->txq_state_mtx, 0);
callout_init_mtx(&sc->watchdog_rfkill, &sc->sc_mtx, 0);
TASK_INIT(&sc->sc_reinittask, 0, wpi_hw_reset, sc);
TASK_INIT(&sc->sc_radiooff_task, 0, wpi_radio_off, sc);
TASK_INIT(&sc->sc_radioon_task, 0, wpi_radio_on, sc);
- TASK_INIT(&sc->sc_start_task, 0, wpi_start_task, sc);
+ TASK_INIT(&sc->sc_start_task, 0, wpi_start, sc);
sc->sc_tq = taskqueue_create("wpi_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->sc_tq);
error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "wpi_taskq");
if (error != 0) {
device_printf(dev, "can't start threads, error %d\n", error);
goto fail;
}
wpi_sysctlattach(sc);
/*
* Hook our interrupt after all initialization is complete.
*/
error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
NULL, wpi_intr, sc, &sc->sc_ih);
if (error != 0) {
device_printf(dev, "can't establish interrupt, error %d\n",
error);
goto fail;
}
if (bootverbose)
ieee80211_announce(ic);
#ifdef WPI_DEBUG
if (sc->sc_debug & WPI_DEBUG_HW)
ieee80211_announce_channels(ic);
#endif
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
fail: wpi_detach(dev);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
return error;
}
/*
* Attach the interface to 802.11 radiotap.
*/
static void
wpi_radiotap_attach(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct wpi_rx_radiotap_header *rxtap = &sc->sc_rxtap;
+ struct wpi_tx_radiotap_header *txtap = &sc->sc_txtap;
+
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- ieee80211_radiotap_attach(ic,
- &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
- WPI_TX_RADIOTAP_PRESENT,
- &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
- WPI_RX_RADIOTAP_PRESENT);
+ ieee80211_radiotap_attach(&sc->sc_ic,
+ &txtap->wt_ihdr, sizeof(*txtap), WPI_TX_RADIOTAP_PRESENT,
+ &rxtap->wr_ihdr, sizeof(*rxtap), WPI_RX_RADIOTAP_PRESENT);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
}
static void
wpi_sysctlattach(struct wpi_softc *sc)
{
#ifdef WPI_DEBUG
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug,
"control debugging printfs");
#endif
}
static void
wpi_init_beacon(struct wpi_vap *wvp)
{
struct wpi_buf *bcn = &wvp->wv_bcbuf;
struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data;
cmd->id = WPI_ID_BROADCAST;
cmd->ofdm_mask = 0xff;
cmd->cck_mask = 0x0f;
cmd->lifetime = htole32(WPI_LIFETIME_INFINITE);
/*
* XXX WPI_TX_AUTO_SEQ seems to be ignored - workaround this issue
* XXX by using WPI_TX_NEED_ACK instead (with some side effects).
*/
cmd->flags = htole32(WPI_TX_NEED_ACK | WPI_TX_INSERT_TSTAMP);
bcn->code = WPI_CMD_SET_BEACON;
bcn->ac = WPI_CMD_QUEUE_NUM;
bcn->size = sizeof(struct wpi_cmd_beacon);
}
static struct ieee80211vap *
wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct wpi_vap *wvp;
struct ieee80211vap *vap;
if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
return NULL;
- wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap),
- M_80211_VAP, M_NOWAIT | M_ZERO);
- if (wvp == NULL)
- return NULL;
+ wvp = malloc(sizeof(struct wpi_vap), M_80211_VAP, M_WAITOK | M_ZERO);
vap = &wvp->wv_vap;
- ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac);
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) {
WPI_VAP_LOCK_INIT(wvp);
wpi_init_beacon(wvp);
}
/* Override with driver methods. */
vap->iv_key_set = wpi_key_set;
vap->iv_key_delete = wpi_key_delete;
wvp->wv_recv_mgmt = vap->iv_recv_mgmt;
vap->iv_recv_mgmt = wpi_recv_mgmt;
wvp->wv_newstate = vap->iv_newstate;
vap->iv_newstate = wpi_newstate;
vap->iv_update_beacon = wpi_update_beacon;
vap->iv_max_aid = WPI_ID_IBSS_MAX - WPI_ID_IBSS_MIN + 1;
ieee80211_ratectl_init(vap);
/* Complete setup. */
ieee80211_vap_attach(vap, ieee80211_media_change,
- ieee80211_media_status);
+ ieee80211_media_status, mac);
ic->ic_opmode = opmode;
return vap;
}
static void
wpi_vap_delete(struct ieee80211vap *vap)
{
struct wpi_vap *wvp = WPI_VAP(vap);
struct wpi_buf *bcn = &wvp->wv_bcbuf;
enum ieee80211_opmode opmode = vap->iv_opmode;
ieee80211_ratectl_deinit(vap);
ieee80211_vap_detach(vap);
if (opmode == IEEE80211_M_IBSS || opmode == IEEE80211_M_HOSTAP) {
if (bcn->m != NULL)
m_freem(bcn->m);
WPI_VAP_LOCK_DESTROY(wvp);
}
free(wvp, M_80211_VAP);
}
static int
wpi_detach(device_t dev)
{
struct wpi_softc *sc = device_get_softc(dev);
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic;
+ struct ieee80211com *ic = &sc->sc_ic;
int qid;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- if (ifp != NULL) {
- ic = ifp->if_l2com;
-
+ if (ic->ic_vap_create == wpi_vap_create) {
ieee80211_draintask(ic, &sc->sc_radioon_task);
ieee80211_draintask(ic, &sc->sc_start_task);
wpi_stop(sc);
- taskqueue_drain_all(sc->sc_tq);
- taskqueue_free(sc->sc_tq);
+ if (sc->sc_tq != NULL) {
+ taskqueue_drain_all(sc->sc_tq);
+ taskqueue_free(sc->sc_tq);
+ }
callout_drain(&sc->watchdog_rfkill);
callout_drain(&sc->tx_timeout);
callout_drain(&sc->scan_timeout);
callout_drain(&sc->calib_to);
ieee80211_ifdetach(ic);
}
/* Uninstall interrupt handler. */
if (sc->irq != NULL) {
bus_teardown_intr(dev, sc->irq, sc->sc_ih);
bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq),
sc->irq);
pci_release_msi(dev);
}
if (sc->txq[0].data_dmat) {
/* Free DMA resources. */
for (qid = 0; qid < WPI_NTXQUEUES; qid++)
wpi_free_tx_ring(sc, &sc->txq[qid]);
wpi_free_rx_ring(sc);
wpi_free_shared(sc);
}
if (sc->fw_dma.tag)
wpi_free_fwmem(sc);
if (sc->mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
rman_get_rid(sc->mem), sc->mem);
- if (ifp != NULL)
- if_free(ifp);
-
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
WPI_TXQ_STATE_LOCK_DESTROY(sc);
WPI_TXQ_LOCK_DESTROY(sc);
WPI_NT_LOCK_DESTROY(sc);
WPI_RXON_LOCK_DESTROY(sc);
WPI_TX_LOCK_DESTROY(sc);
WPI_LOCK_DESTROY(sc);
return 0;
}
static int
wpi_shutdown(device_t dev)
{
struct wpi_softc *sc = device_get_softc(dev);
wpi_stop(sc);
return 0;
}
static int
wpi_suspend(device_t dev)
{
struct wpi_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
ieee80211_suspend_all(ic);
return 0;
}
static int
wpi_resume(device_t dev)
{
struct wpi_softc *sc = device_get_softc(dev);
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
/* Clear device-specific "PCI retry timeout" register (41h). */
pci_write_config(dev, 0x41, 0, 1);
ieee80211_resume_all(ic);
return 0;
}
/*
* Grab exclusive access to NIC memory.
*/
static int
wpi_nic_lock(struct wpi_softc *sc)
{
int ntries;
/* Request exclusive access to NIC. */
WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
/* Spin until we actually get the lock. */
for (ntries = 0; ntries < 1000; ntries++) {
if ((WPI_READ(sc, WPI_GP_CNTRL) &
(WPI_GP_CNTRL_MAC_ACCESS_ENA | WPI_GP_CNTRL_SLEEP)) ==
WPI_GP_CNTRL_MAC_ACCESS_ENA)
return 0;
DELAY(10);
}
device_printf(sc->sc_dev, "could not lock memory\n");
return ETIMEDOUT;
}
/*
* Release lock on NIC memory.
*/
static __inline void
wpi_nic_unlock(struct wpi_softc *sc)
{
WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
}
static __inline uint32_t
wpi_prph_read(struct wpi_softc *sc, uint32_t addr)
{
WPI_WRITE(sc, WPI_PRPH_RADDR, WPI_PRPH_DWORD | addr);
WPI_BARRIER_READ_WRITE(sc);
return WPI_READ(sc, WPI_PRPH_RDATA);
}
static __inline void
wpi_prph_write(struct wpi_softc *sc, uint32_t addr, uint32_t data)
{
WPI_WRITE(sc, WPI_PRPH_WADDR, WPI_PRPH_DWORD | addr);
WPI_BARRIER_WRITE(sc);
WPI_WRITE(sc, WPI_PRPH_WDATA, data);
}
static __inline void
wpi_prph_setbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
{
wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) | mask);
}
static __inline void
wpi_prph_clrbits(struct wpi_softc *sc, uint32_t addr, uint32_t mask)
{
wpi_prph_write(sc, addr, wpi_prph_read(sc, addr) & ~mask);
}
static __inline void
wpi_prph_write_region_4(struct wpi_softc *sc, uint32_t addr,
const uint32_t *data, int count)
{
for (; count > 0; count--, data++, addr += 4)
wpi_prph_write(sc, addr, *data);
}
static __inline uint32_t
wpi_mem_read(struct wpi_softc *sc, uint32_t addr)
{
WPI_WRITE(sc, WPI_MEM_RADDR, addr);
WPI_BARRIER_READ_WRITE(sc);
return WPI_READ(sc, WPI_MEM_RDATA);
}
static __inline void
wpi_mem_read_region_4(struct wpi_softc *sc, uint32_t addr, uint32_t *data,
int count)
{
for (; count > 0; count--, addr += 4)
*data++ = wpi_mem_read(sc, addr);
}
static int
wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int count)
{
uint8_t *out = data;
uint32_t val;
int error, ntries;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
if ((error = wpi_nic_lock(sc)) != 0)
return error;
for (; count > 0; count -= 2, addr++) {
WPI_WRITE(sc, WPI_EEPROM, addr << 2);
for (ntries = 0; ntries < 10; ntries++) {
val = WPI_READ(sc, WPI_EEPROM);
if (val & WPI_EEPROM_READ_VALID)
break;
DELAY(5);
}
if (ntries == 10) {
device_printf(sc->sc_dev,
"timeout reading ROM at 0x%x\n", addr);
return ETIMEDOUT;
}
*out++= val >> 16;
if (count > 1)
*out ++= val >> 24;
}
wpi_nic_unlock(sc);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
}
static void
wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
{
if (error != 0)
return;
KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
*(bus_addr_t *)arg = segs[0].ds_addr;
}
/*
* Allocates a contiguous block of dma memory of the requested size and
* alignment.
*/
static int
wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma,
void **kvap, bus_size_t size, bus_size_t alignment)
{
int error;
dma->tag = NULL;
dma->size = size;
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment,
0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
1, size, BUS_DMA_NOWAIT, NULL, NULL, &dma->tag);
if (error != 0)
goto fail;
error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
if (error != 0)
goto fail;
error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
wpi_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
if (error != 0)
goto fail;
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
if (kvap != NULL)
*kvap = dma->vaddr;
return 0;
fail: wpi_dma_contig_free(dma);
return error;
}
static void
wpi_dma_contig_free(struct wpi_dma_info *dma)
{
if (dma->vaddr != NULL) {
bus_dmamap_sync(dma->tag, dma->map,
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(dma->tag, dma->map);
bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
dma->vaddr = NULL;
}
if (dma->tag != NULL) {
bus_dma_tag_destroy(dma->tag);
dma->tag = NULL;
}
}
/*
* Allocate a shared page between host and NIC.
*/
static int
wpi_alloc_shared(struct wpi_softc *sc)
{
/* Shared buffer must be aligned on a 4KB boundary. */
return wpi_dma_contig_alloc(sc, &sc->shared_dma,
(void **)&sc->shared, sizeof (struct wpi_shared), 4096);
}
static void
wpi_free_shared(struct wpi_softc *sc)
{
wpi_dma_contig_free(&sc->shared_dma);
}
/*
* Allocate DMA-safe memory for firmware transfer.
*/
static int
wpi_alloc_fwmem(struct wpi_softc *sc)
{
/* Must be aligned on a 16-byte boundary. */
return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL,
WPI_FW_TEXT_MAXSZ + WPI_FW_DATA_MAXSZ, 16);
}
static void
wpi_free_fwmem(struct wpi_softc *sc)
{
wpi_dma_contig_free(&sc->fw_dma);
}
static int
wpi_alloc_rx_ring(struct wpi_softc *sc)
{
struct wpi_rx_ring *ring = &sc->rxq;
bus_size_t size;
int i, error;
ring->cur = 0;
ring->update = 0;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
/* Allocate RX descriptors (16KB aligned.) */
size = WPI_RX_RING_COUNT * sizeof (uint32_t);
error = wpi_dma_contig_alloc(sc, &ring->desc_dma,
(void **)&ring->desc, size, WPI_RING_DMA_ALIGN);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not allocate RX ring DMA memory, error %d\n",
__func__, error);
goto fail;
}
/* Create RX buffer DMA tag. */
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL,
&ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create RX buf DMA tag, error %d\n",
__func__, error);
goto fail;
}
/*
* Allocate and map RX buffers.
*/
for (i = 0; i < WPI_RX_RING_COUNT; i++) {
struct wpi_rx_data *data = &ring->data[i];
bus_addr_t paddr;
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create RX buf DMA map, error %d\n",
__func__, error);
goto fail;
}
data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"%s: could not allocate RX mbuf\n", __func__);
error = ENOBUFS;
goto fail;
}
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
&paddr, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev,
"%s: can't map mbuf (error %d)\n", __func__,
error);
goto fail;
}
/* Set physical address of RX buffer. */
ring->desc[i] = htole32(paddr);
}
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
fail: wpi_free_rx_ring(sc);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
return error;
}
static void
wpi_update_rx_ring(struct wpi_softc *sc)
{
WPI_WRITE(sc, WPI_FH_RX_WPTR, sc->rxq.cur & ~7);
}
static void
wpi_update_rx_ring_ps(struct wpi_softc *sc)
{
struct wpi_rx_ring *ring = &sc->rxq;
if (ring->update != 0) {
/* Wait for INT_WAKEUP event. */
return;
}
WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) {
DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s: wakeup request\n",
__func__);
ring->update = 1;
} else {
wpi_update_rx_ring(sc);
WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
}
}
static void
wpi_reset_rx_ring(struct wpi_softc *sc)
{
struct wpi_rx_ring *ring = &sc->rxq;
int ntries;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
if (wpi_nic_lock(sc) == 0) {
WPI_WRITE(sc, WPI_FH_RX_CONFIG, 0);
for (ntries = 0; ntries < 1000; ntries++) {
if (WPI_READ(sc, WPI_FH_RX_STATUS) &
WPI_FH_RX_STATUS_IDLE)
break;
DELAY(10);
}
wpi_nic_unlock(sc);
}
ring->cur = 0;
ring->update = 0;
}
static void
wpi_free_rx_ring(struct wpi_softc *sc)
{
struct wpi_rx_ring *ring = &sc->rxq;
int i;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
wpi_dma_contig_free(&ring->desc_dma);
for (i = 0; i < WPI_RX_RING_COUNT; i++) {
struct wpi_rx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
if (ring->data_dmat != NULL) {
bus_dma_tag_destroy(ring->data_dmat);
ring->data_dmat = NULL;
}
}
static int
wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int qid)
{
bus_addr_t paddr;
bus_size_t size;
int i, error;
ring->qid = qid;
ring->queued = 0;
ring->cur = 0;
ring->update = 0;
+ mbufq_init(&ring->snd, ifqmaxlen);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
/* Allocate TX descriptors (16KB aligned.) */
size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_desc);
error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc,
size, WPI_RING_DMA_ALIGN);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not allocate TX ring DMA memory, error %d\n",
__func__, error);
goto fail;
}
/* Update shared area with ring physical address. */
sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr);
bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
BUS_DMASYNC_PREWRITE);
/*
* We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
* to allocate commands space for other rings.
* XXX Do we really need to allocate descriptors for other rings?
*/
if (qid > WPI_CMD_QUEUE_NUM) {
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
}
size = WPI_TX_RING_COUNT * sizeof (struct wpi_tx_cmd);
error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd,
size, 4);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not allocate TX cmd DMA memory, error %d\n",
__func__, error);
goto fail;
}
error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL,
&ring->data_dmat);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create TX buf DMA tag, error %d\n",
__func__, error);
goto fail;
}
paddr = ring->cmd_dma.paddr;
for (i = 0; i < WPI_TX_RING_COUNT; i++) {
struct wpi_tx_data *data = &ring->data[i];
data->cmd_paddr = paddr;
paddr += sizeof (struct wpi_tx_cmd);
error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not create TX buf DMA map, error %d\n",
__func__, error);
goto fail;
}
}
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
fail: wpi_free_tx_ring(sc, ring);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
return error;
}
static void
wpi_update_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
{
WPI_WRITE(sc, WPI_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
}
static void
wpi_update_tx_ring_ps(struct wpi_softc *sc, struct wpi_tx_ring *ring)
{
if (ring->update != 0) {
/* Wait for INT_WAKEUP event. */
return;
}
WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_SLEEP) {
DPRINTF(sc, WPI_DEBUG_PWRSAVE, "%s (%d): requesting wakeup\n",
__func__, ring->qid);
ring->update = 1;
} else {
wpi_update_tx_ring(sc, ring);
WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
}
}
static void
wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
{
int i;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
for (i = 0; i < WPI_TX_RING_COUNT; i++) {
struct wpi_tx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
if (data->ni != NULL) {
ieee80211_free_node(data->ni);
data->ni = NULL;
}
}
/* Clear TX descriptors. */
memset(ring->desc, 0, ring->desc_dma.size);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
+ mbufq_drain(&ring->snd);
sc->qfullmsk &= ~(1 << ring->qid);
ring->queued = 0;
ring->cur = 0;
ring->update = 0;
}
static void
wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring)
{
int i;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
wpi_dma_contig_free(&ring->desc_dma);
wpi_dma_contig_free(&ring->cmd_dma);
for (i = 0; i < WPI_TX_RING_COUNT; i++) {
struct wpi_tx_data *data = &ring->data[i];
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
}
if (data->map != NULL)
bus_dmamap_destroy(ring->data_dmat, data->map);
}
if (ring->data_dmat != NULL) {
bus_dma_tag_destroy(ring->data_dmat);
ring->data_dmat = NULL;
}
}
/*
* Extract various information from EEPROM.
*/
static int
wpi_read_eeprom(struct wpi_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN])
{
#define WPI_CHK(res) do { \
if ((error = res) != 0) \
goto fail; \
} while (0)
int error, i;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
/* Adapter has to be powered on for EEPROM access to work. */
if ((error = wpi_apm_init(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not power ON adapter, error %d\n", __func__,
error);
return error;
}
if ((WPI_READ(sc, WPI_EEPROM_GP) & 0x6) == 0) {
device_printf(sc->sc_dev, "bad EEPROM signature\n");
error = EIO;
goto fail;
}
/* Clear HW ownership of EEPROM. */
WPI_CLRBITS(sc, WPI_EEPROM_GP, WPI_EEPROM_GP_IF_OWNER);
/* Read the hardware capabilities, revision and SKU type. */
WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_SKU_CAP, &sc->cap,
sizeof(sc->cap)));
WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev,
sizeof(sc->rev)));
WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type,
sizeof(sc->type)));
sc->rev = le16toh(sc->rev);
DPRINTF(sc, WPI_DEBUG_EEPROM, "cap=%x rev=%x type=%x\n", sc->cap,
sc->rev, sc->type);
/* Read the regulatory domain (4 ASCII characters.) */
WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain,
sizeof(sc->domain)));
/* Read MAC address. */
WPI_CHK(wpi_read_prom_data(sc, WPI_EEPROM_MAC, macaddr,
IEEE80211_ADDR_LEN));
/* Read the list of authorized channels. */
for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++)
WPI_CHK(wpi_read_eeprom_channels(sc, i));
/* Read the list of TX power groups. */
for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++)
WPI_CHK(wpi_read_eeprom_group(sc, i));
fail: wpi_apm_stop(sc); /* Power OFF adapter. */
DPRINTF(sc, WPI_DEBUG_TRACE, error ? TRACE_STR_END_ERR : TRACE_STR_END,
__func__);
return error;
#undef WPI_CHK
}
/*
* Translate EEPROM flags to net80211.
*/
static uint32_t
wpi_eeprom_channel_flags(struct wpi_eeprom_chan *channel)
{
uint32_t nflags;
nflags = 0;
if ((channel->flags & WPI_EEPROM_CHAN_ACTIVE) == 0)
nflags |= IEEE80211_CHAN_PASSIVE;
if ((channel->flags & WPI_EEPROM_CHAN_IBSS) == 0)
nflags |= IEEE80211_CHAN_NOADHOC;
if (channel->flags & WPI_EEPROM_CHAN_RADAR) {
nflags |= IEEE80211_CHAN_DFS;
/* XXX apparently IBSS may still be marked */
nflags |= IEEE80211_CHAN_NOADHOC;
}
/* XXX HOSTAP uses WPI_MODE_IBSS */
if (nflags & IEEE80211_CHAN_NOADHOC)
nflags |= IEEE80211_CHAN_NOHOSTAP;
return nflags;
}
static void
wpi_read_eeprom_band(struct wpi_softc *sc, int n)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct wpi_eeprom_chan *channels = sc->eeprom_channels[n];
const struct wpi_chan_band *band = &wpi_bands[n];
struct ieee80211_channel *c;
uint8_t chan;
int i, nflags;
for (i = 0; i < band->nchan; i++) {
if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) {
DPRINTF(sc, WPI_DEBUG_EEPROM,
"Channel Not Valid: %d, band %d\n",
band->chan[i],n);
continue;
}
chan = band->chan[i];
nflags = wpi_eeprom_channel_flags(&channels[i]);
c = &ic->ic_channels[ic->ic_nchans++];
c->ic_ieee = chan;
c->ic_maxregpower = channels[i].maxpwr;
c->ic_maxpower = 2*c->ic_maxregpower;
if (n == 0) { /* 2GHz band */
c->ic_freq = ieee80211_ieee2mhz(chan,
IEEE80211_CHAN_G);
/* G =>'s B is supported */
c->ic_flags = IEEE80211_CHAN_B | nflags;
c = &ic->ic_channels[ic->ic_nchans++];
c[0] = c[-1];
c->ic_flags = IEEE80211_CHAN_G | nflags;
} else { /* 5GHz band */
c->ic_freq = ieee80211_ieee2mhz(chan,
IEEE80211_CHAN_A);
c->ic_flags = IEEE80211_CHAN_A | nflags;
}
/* Save maximum allowed TX power for this channel. */
sc->maxpwr[chan] = channels[i].maxpwr;
DPRINTF(sc, WPI_DEBUG_EEPROM,
"adding chan %d (%dMHz) flags=0x%x maxpwr=%d passive=%d,"
" offset %d\n", chan, c->ic_freq,
channels[i].flags, sc->maxpwr[chan],
IEEE80211_IS_CHAN_PASSIVE(c), ic->ic_nchans);
}
}
/**
* Read the eeprom to find out what channels are valid for the given
* band and update net80211 with what we find.
*/
static int
wpi_read_eeprom_channels(struct wpi_softc *sc, int n)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
const struct wpi_chan_band *band = &wpi_bands[n];
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
error = wpi_read_prom_data(sc, band->addr, &sc->eeprom_channels[n],
band->nchan * sizeof (struct wpi_eeprom_chan));
if (error != 0) {
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
return error;
}
wpi_read_eeprom_band(sc, n);
ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
}
static struct wpi_eeprom_chan *
wpi_find_eeprom_channel(struct wpi_softc *sc, struct ieee80211_channel *c)
{
int i, j;
for (j = 0; j < WPI_CHAN_BANDS_COUNT; j++)
for (i = 0; i < wpi_bands[j].nchan; i++)
if (wpi_bands[j].chan[i] == c->ic_ieee)
return &sc->eeprom_channels[j][i];
return NULL;
}
/*
* Enforce flags read from EEPROM.
*/
static int
wpi_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd,
int nchan, struct ieee80211_channel chans[])
{
struct wpi_softc *sc = ic->ic_softc;
int i;
for (i = 0; i < nchan; i++) {
struct ieee80211_channel *c = &chans[i];
struct wpi_eeprom_chan *channel;
channel = wpi_find_eeprom_channel(sc, c);
if (channel == NULL) {
- if_printf(ic->ic_ifp,
- "%s: invalid channel %u freq %u/0x%x\n",
+ ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n",
__func__, c->ic_ieee, c->ic_freq, c->ic_flags);
return EINVAL;
}
c->ic_flags |= wpi_eeprom_channel_flags(channel);
}
return 0;
}
static int
wpi_read_eeprom_group(struct wpi_softc *sc, int n)
{
struct wpi_power_group *group = &sc->groups[n];
struct wpi_eeprom_group rgroup;
int i, error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
if ((error = wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32,
&rgroup, sizeof rgroup)) != 0) {
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
return error;
}
/* Save TX power group information. */
group->chan = rgroup.chan;
group->maxpwr = rgroup.maxpwr;
/* Retrieve temperature at which the samples were taken. */
group->temp = (int16_t)le16toh(rgroup.temp);
DPRINTF(sc, WPI_DEBUG_EEPROM,
"power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan,
group->maxpwr, group->temp);
for (i = 0; i < WPI_SAMPLES_COUNT; i++) {
group->samples[i].index = rgroup.samples[i].index;
group->samples[i].power = rgroup.samples[i].power;
DPRINTF(sc, WPI_DEBUG_EEPROM,
"\tsample %d: index=%d power=%d\n", i,
group->samples[i].index, group->samples[i].power);
}
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
}
static int
wpi_add_node_entry_adhoc(struct wpi_softc *sc)
{
int newid = WPI_ID_IBSS_MIN;
for (; newid <= WPI_ID_IBSS_MAX; newid++) {
if ((sc->nodesmsk & (1 << newid)) == 0) {
sc->nodesmsk |= 1 << newid;
return newid;
}
}
return WPI_ID_UNDEFINED;
}
static __inline int
wpi_add_node_entry_sta(struct wpi_softc *sc)
{
sc->nodesmsk |= 1 << WPI_ID_BSS;
return WPI_ID_BSS;
}
static __inline int
wpi_check_node_entry(struct wpi_softc *sc, uint8_t id)
{
if (id == WPI_ID_UNDEFINED)
return 0;
return (sc->nodesmsk >> id) & 1;
}
static __inline void
wpi_clear_node_table(struct wpi_softc *sc)
{
sc->nodesmsk = 0;
}
static __inline void
wpi_del_node_entry(struct wpi_softc *sc, uint8_t id)
{
sc->nodesmsk &= ~(1 << id);
}
static struct ieee80211_node *
wpi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct wpi_node *wn;
wn = malloc(sizeof (struct wpi_node), M_80211_NODE,
M_NOWAIT | M_ZERO);
if (wn == NULL)
return NULL;
wn->id = WPI_ID_UNDEFINED;
return &wn->ni;
}
static void
wpi_node_free(struct ieee80211_node *ni)
{
- struct ieee80211com *ic = ni->ni_ic;
- struct wpi_softc *sc = ic->ic_softc;
+ struct wpi_softc *sc = ni->ni_ic->ic_softc;
struct wpi_node *wn = WPI_NODE(ni);
if (wn->id != WPI_ID_UNDEFINED) {
WPI_NT_LOCK(sc);
if (wpi_check_node_entry(sc, wn->id)) {
wpi_del_node_entry(sc, wn->id);
wpi_del_node(sc, ni);
}
WPI_NT_UNLOCK(sc);
}
sc->sc_node_free(ni);
}
static __inline int
wpi_check_bss_filter(struct wpi_softc *sc)
{
return (sc->rxon.filter & htole32(WPI_FILTER_BSS)) != 0;
}
static void
wpi_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype,
const struct ieee80211_rx_stats *rxs,
int rssi, int nf)
{
struct ieee80211vap *vap = ni->ni_vap;
struct wpi_softc *sc = vap->iv_ic->ic_softc;
struct wpi_vap *wvp = WPI_VAP(vap);
uint64_t ni_tstamp, rx_tstamp;
wvp->wv_recv_mgmt(ni, m, subtype, rxs, rssi, nf);
if (vap->iv_opmode == IEEE80211_M_IBSS &&
vap->iv_state == IEEE80211_S_RUN &&
(subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
ni_tstamp = le64toh(ni->ni_tstamp.tsf);
rx_tstamp = le64toh(sc->rx_tstamp);
if (ni_tstamp >= rx_tstamp) {
DPRINTF(sc, WPI_DEBUG_STATE,
"ibss merge, tsf %ju tstamp %ju\n",
(uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp);
(void) ieee80211_ibss_merge(ni);
}
}
}
static void
wpi_restore_node(void *arg, struct ieee80211_node *ni)
{
struct wpi_softc *sc = arg;
struct wpi_node *wn = WPI_NODE(ni);
int error;
WPI_NT_LOCK(sc);
if (wn->id != WPI_ID_UNDEFINED) {
wn->id = WPI_ID_UNDEFINED;
if ((error = wpi_add_ibss_node(sc, ni)) != 0) {
device_printf(sc->sc_dev,
"%s: could not add IBSS node, error %d\n",
__func__, error);
}
}
WPI_NT_UNLOCK(sc);
}
static void
wpi_restore_node_table(struct wpi_softc *sc, struct wpi_vap *wvp)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
/* Set group keys once. */
WPI_NT_LOCK(sc);
wvp->wv_gtk = 0;
WPI_NT_UNLOCK(sc);
ieee80211_iterate_nodes(&ic->ic_sta, wpi_restore_node, sc);
ieee80211_crypto_reload_keys(ic);
}
/**
* Called by net80211 when ever there is a change to 80211 state machine
*/
static int
wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct wpi_vap *wvp = WPI_VAP(vap);
struct ieee80211com *ic = vap->iv_ic;
struct wpi_softc *sc = ic->ic_softc;
int error = 0;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
+ WPI_TXQ_LOCK(sc);
+ if (nstate > IEEE80211_S_INIT && sc->sc_running == 0) {
+ DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
+ WPI_TXQ_UNLOCK(sc);
+
+ return ENXIO;
+ }
+ WPI_TXQ_UNLOCK(sc);
+
DPRINTF(sc, WPI_DEBUG_STATE, "%s: %s -> %s\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[nstate]);
if (vap->iv_state == IEEE80211_S_RUN && nstate < IEEE80211_S_RUN) {
if ((error = wpi_set_pslevel(sc, 0, 0, 1)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set power saving level\n",
__func__);
return error;
}
wpi_set_led(sc, WPI_LED_LINK, 1, 0);
}
switch (nstate) {
case IEEE80211_S_SCAN:
WPI_RXON_LOCK(sc);
if (wpi_check_bss_filter(sc) != 0) {
sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
device_printf(sc->sc_dev,
"%s: could not send RXON\n", __func__);
}
}
WPI_RXON_UNLOCK(sc);
break;
case IEEE80211_S_ASSOC:
if (vap->iv_state != IEEE80211_S_RUN)
break;
/* FALLTHROUGH */
case IEEE80211_S_AUTH:
/*
* NB: do not optimize AUTH -> AUTH state transmission -
* this will break powersave with non-QoS AP!
*/
/*
* The node must be registered in the firmware before auth.
* Also the associd must be cleared on RUN -> ASSOC
* transitions.
*/
if ((error = wpi_auth(sc, vap)) != 0) {
device_printf(sc->sc_dev,
"%s: could not move to AUTH state, error %d\n",
__func__, error);
}
break;
case IEEE80211_S_RUN:
/*
* RUN -> RUN transition:
* STA mode: Just restart the timers.
* IBSS mode: Process IBSS merge.
*/
if (vap->iv_state == IEEE80211_S_RUN) {
if (vap->iv_opmode != IEEE80211_M_IBSS) {
WPI_RXON_LOCK(sc);
wpi_calib_timeout(sc);
WPI_RXON_UNLOCK(sc);
break;
} else {
/*
* Drop the BSS_FILTER bit
* (there is no another way to change bssid).
*/
WPI_RXON_LOCK(sc);
sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
device_printf(sc->sc_dev,
"%s: could not send RXON\n",
__func__);
}
WPI_RXON_UNLOCK(sc);
/* Restore all what was lost. */
wpi_restore_node_table(sc, wvp);
/* XXX set conditionally? */
wpi_updateedca(ic);
}
}
/*
* !RUN -> RUN requires setting the association id
* which is done with a firmware cmd. We also defer
* starting the timers until that work is done.
*/
if ((error = wpi_run(sc, vap)) != 0) {
device_printf(sc->sc_dev,
"%s: could not move to RUN state\n", __func__);
}
break;
default:
break;
}
if (error != 0) {
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
return error;
}
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return wvp->wv_newstate(vap, nstate, arg);
}
static void
wpi_calib_timeout(void *arg)
{
struct wpi_softc *sc = arg;
if (wpi_check_bss_filter(sc) == 0)
return;
wpi_power_calibration(sc);
callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
}
static __inline uint8_t
rate2plcp(const uint8_t rate)
{
switch (rate) {
case 12: return 0xd;
case 18: return 0xf;
case 24: return 0x5;
case 36: return 0x7;
case 48: return 0x9;
case 72: return 0xb;
case 96: return 0x1;
case 108: return 0x3;
case 2: return 10;
case 4: return 20;
case 11: return 55;
case 22: return 110;
default: return 0;
}
}
static __inline uint8_t
plcp2rate(const uint8_t plcp)
{
switch (plcp) {
case 0xd: return 12;
case 0xf: return 18;
case 0x5: return 24;
case 0x7: return 36;
case 0x9: return 48;
case 0xb: return 72;
case 0x1: return 96;
case 0x3: return 108;
case 10: return 2;
case 20: return 4;
case 55: return 11;
case 110: return 22;
default: return 0;
}
}
/* Quickly determine if a given rate is CCK or OFDM. */
#define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22)
static void
wpi_rx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc,
struct wpi_rx_data *data)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct wpi_rx_ring *ring = &sc->rxq;
struct wpi_rx_stat *stat;
struct wpi_rx_head *head;
struct wpi_rx_tail *tail;
struct ieee80211_frame *wh;
struct ieee80211_node *ni;
struct mbuf *m, *m1;
bus_addr_t paddr;
uint32_t flags;
uint16_t len;
int error;
stat = (struct wpi_rx_stat *)(desc + 1);
if (stat->len > WPI_STAT_MAXLEN) {
device_printf(sc->sc_dev, "invalid RX statistic header\n");
goto fail1;
}
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len);
len = le16toh(head->len);
tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + len);
flags = le32toh(tail->flags);
DPRINTF(sc, WPI_DEBUG_RECV, "%s: idx %d len %d stat len %u rssi %d"
" rate %x chan %d tstamp %ju\n", __func__, ring->cur,
le32toh(desc->len), len, (int8_t)stat->rssi,
head->plcp, head->chan, (uintmax_t)le64toh(tail->tstamp));
/* Discard frames with a bad FCS early. */
if ((flags & WPI_RX_NOERROR) != WPI_RX_NOERROR) {
DPRINTF(sc, WPI_DEBUG_RECV, "%s: RX flags error %x\n",
__func__, flags);
goto fail1;
}
/* Discard frames that are too short. */
if (len < sizeof (struct ieee80211_frame_ack)) {
DPRINTF(sc, WPI_DEBUG_RECV, "%s: frame too short: %d\n",
__func__, len);
goto fail1;
}
m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
if (m1 == NULL) {
DPRINTF(sc, WPI_DEBUG_ANY, "%s: no mbuf to restock ring\n",
__func__);
goto fail1;
}
bus_dmamap_unload(ring->data_dmat, data->map);
error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *),
MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev,
"%s: bus_dmamap_load failed, error %d\n", __func__, error);
m_freem(m1);
/* Try to reload the old mbuf. */
error = bus_dmamap_load(ring->data_dmat, data->map,
mtod(data->m, void *), MJUMPAGESIZE, wpi_dma_map_addr,
&paddr, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
panic("%s: could not load old RX mbuf", __func__);
}
/* Physical address may have changed. */
ring->desc[ring->cur] = htole32(paddr);
bus_dmamap_sync(ring->data_dmat, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
goto fail1;
}
m = data->m;
data->m = m1;
/* Update RX descriptor. */
ring->desc[ring->cur] = htole32(paddr);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
/* Finalize mbuf. */
- m->m_pkthdr.rcvif = ifp;
m->m_data = (caddr_t)(head + 1);
m->m_pkthdr.len = m->m_len = len;
/* Grab a reference to the source node. */
wh = mtod(m, struct ieee80211_frame *);
if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) &&
(flags & WPI_RX_CIPHER_MASK) == WPI_RX_CIPHER_CCMP) {
/* Check whether decryption was successful or not. */
if ((flags & WPI_RX_DECRYPT_MASK) != WPI_RX_DECRYPT_OK) {
DPRINTF(sc, WPI_DEBUG_RECV,
"CCMP decryption failed 0x%x\n", flags);
goto fail2;
}
m->m_flags |= M_WEP;
}
if (len >= sizeof(struct ieee80211_frame_min))
ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
else
ni = NULL;
sc->rx_tstamp = tail->tstamp;
if (ieee80211_radiotap_active(ic)) {
struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap;
tap->wr_flags = 0;
if (head->flags & htole16(WPI_STAT_FLAG_SHPREAMBLE))
tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
tap->wr_dbm_antsignal = (int8_t)(stat->rssi + WPI_RSSI_OFFSET);
tap->wr_dbm_antnoise = WPI_RSSI_OFFSET;
tap->wr_tsft = tail->tstamp;
tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf;
tap->wr_rate = plcp2rate(head->plcp);
}
WPI_UNLOCK(sc);
/* Send the frame to the 802.11 layer. */
if (ni != NULL) {
(void)ieee80211_input(ni, m, stat->rssi, WPI_RSSI_OFFSET);
/* Node is no longer needed. */
ieee80211_free_node(ni);
} else
(void)ieee80211_input_all(ic, m, stat->rssi, WPI_RSSI_OFFSET);
WPI_LOCK(sc);
return;
fail2: m_freem(m);
-fail1: if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+fail1: counter_u64_add(ic->ic_ierrors, 1);
}
static void
wpi_rx_statistics(struct wpi_softc *sc, struct wpi_rx_desc *desc,
struct wpi_rx_data *data)
{
/* Ignore */
}
static void
wpi_tx_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
{
- struct ifnet *ifp = sc->sc_ifp;
struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3];
struct wpi_tx_data *data = &ring->data[desc->idx];
struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1);
struct mbuf *m;
struct ieee80211_node *ni;
struct ieee80211vap *vap;
struct ieee80211com *ic;
uint32_t status = le32toh(stat->status);
int ackfailcnt = stat->ackfailcnt / WPI_NTRIES_DEFAULT;
KASSERT(data->ni != NULL, ("no node"));
KASSERT(data->m != NULL, ("no mbuf"));
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
DPRINTF(sc, WPI_DEBUG_XMIT, "%s: "
"qid %d idx %d retries %d btkillcnt %d rate %x duration %d "
"status %x\n", __func__, desc->qid, desc->idx, stat->ackfailcnt,
stat->btkillcnt, stat->rate, le32toh(stat->duration), status);
/* Unmap and free mbuf. */
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m = data->m, data->m = NULL;
ni = data->ni, data->ni = NULL;
vap = ni->ni_vap;
ic = vap->iv_ic;
/*
* Update rate control statistics for the node.
*/
if (status & WPI_TX_STATUS_FAIL) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_FAILURE, &ackfailcnt, NULL);
- } else {
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ } else
ieee80211_ratectl_tx_complete(vap, ni,
IEEE80211_RATECTL_TX_SUCCESS, &ackfailcnt, NULL);
- }
ieee80211_tx_complete(ni, m, (status & WPI_TX_STATUS_FAIL) != 0);
WPI_TXQ_STATE_LOCK(sc);
ring->queued -= 1;
if (ring->queued > 0) {
callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc);
- if (sc->qfullmsk != 0 &&
- ring->queued < WPI_TX_RING_LOMARK) {
+ if ((sc->qfullmsk & (1 << ring->qid)) != 0 &&
+ ring->queued < WPI_TX_RING_LOMARK) {
sc->qfullmsk &= ~(1 << ring->qid);
- IF_LOCK(&ifp->if_snd);
- if (sc->qfullmsk == 0 &&
- (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
- ieee80211_runtask(ic, &sc->sc_start_task);
- } else
- IF_UNLOCK(&ifp->if_snd);
+ ieee80211_runtask(ic, &sc->sc_start_task);
}
} else
callout_stop(&sc->tx_timeout);
WPI_TXQ_STATE_UNLOCK(sc);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
}
/*
* Process a "command done" firmware notification. This is where we wakeup
* processes waiting for a synchronous command completion.
*/
static void
wpi_cmd_done(struct wpi_softc *sc, struct wpi_rx_desc *desc)
{
struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM];
struct wpi_tx_data *data;
DPRINTF(sc, WPI_DEBUG_CMD, "cmd notification qid %x idx %d flags %x "
"type %s len %d\n", desc->qid, desc->idx,
desc->flags, wpi_cmd_str(desc->type),
le32toh(desc->len));
if ((desc->qid & WPI_RX_DESC_QID_MSK) != WPI_CMD_QUEUE_NUM)
return; /* Not a command ack. */
KASSERT(ring->queued == 0, ("ring->queued must be 0"));
data = &ring->data[desc->idx];
/* If the command was mapped in an mbuf, free it. */
if (data->m != NULL) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(ring->data_dmat, data->map);
m_freem(data->m);
data->m = NULL;
}
wakeup(&ring->cmd[desc->idx]);
if (desc->type == WPI_CMD_SET_POWER_MODE) {
WPI_TXQ_LOCK(sc);
if (sc->sc_flags & WPI_PS_PATH) {
sc->sc_update_rx_ring = wpi_update_rx_ring_ps;
sc->sc_update_tx_ring = wpi_update_tx_ring_ps;
} else {
sc->sc_update_rx_ring = wpi_update_rx_ring;
sc->sc_update_tx_ring = wpi_update_tx_ring;
}
WPI_TXQ_UNLOCK(sc);
}
}
static void
wpi_notif_intr(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t hw;
bus_dmamap_sync(sc->shared_dma.tag, sc->shared_dma.map,
BUS_DMASYNC_POSTREAD);
hw = le32toh(sc->shared->next) & 0xfff;
hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1;
while (sc->rxq.cur != hw) {
sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT;
struct wpi_rx_data *data = &sc->rxq.data[sc->rxq.cur];
struct wpi_rx_desc *desc;
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
desc = mtod(data->m, struct wpi_rx_desc *);
DPRINTF(sc, WPI_DEBUG_NOTIFY,
"%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n",
__func__, sc->rxq.cur, desc->qid, desc->idx, desc->flags,
desc->type, wpi_cmd_str(desc->type), le32toh(desc->len));
if (!(desc->qid & WPI_UNSOLICITED_RX_NOTIF)) {
/* Reply to a command. */
wpi_cmd_done(sc, desc);
}
switch (desc->type) {
case WPI_RX_DONE:
/* An 802.11 frame has been received. */
wpi_rx_done(sc, desc, data);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if (sc->sc_running == 0) {
/* wpi_stop() was called. */
return;
}
break;
case WPI_TX_DONE:
/* An 802.11 frame has been transmitted. */
wpi_tx_done(sc, desc);
break;
case WPI_RX_STATISTICS:
case WPI_BEACON_STATISTICS:
wpi_rx_statistics(sc, desc, data);
break;
case WPI_BEACON_MISSED:
{
struct wpi_beacon_missed *miss =
(struct wpi_beacon_missed *)(desc + 1);
uint32_t expected, misses, received, threshold;
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
misses = le32toh(miss->consecutive);
expected = le32toh(miss->expected);
received = le32toh(miss->received);
threshold = MAX(2, vap->iv_bmissthreshold);
DPRINTF(sc, WPI_DEBUG_BMISS,
"%s: beacons missed %u(%u) (received %u/%u)\n",
__func__, misses, le32toh(miss->total), received,
expected);
if (misses >= threshold ||
(received == 0 && expected >= threshold)) {
WPI_RXON_LOCK(sc);
if (callout_pending(&sc->scan_timeout)) {
wpi_cmd(sc, WPI_CMD_SCAN_ABORT, NULL,
0, 1);
}
WPI_RXON_UNLOCK(sc);
if (vap->iv_state == IEEE80211_S_RUN &&
(ic->ic_flags & IEEE80211_F_SCAN) == 0)
ieee80211_beacon_miss(ic);
}
break;
}
#ifdef WPI_DEBUG
case WPI_BEACON_SENT:
{
struct wpi_tx_stat *stat =
(struct wpi_tx_stat *)(desc + 1);
uint64_t *tsf = (uint64_t *)(stat + 1);
uint32_t *mode = (uint32_t *)(tsf + 1);
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
DPRINTF(sc, WPI_DEBUG_BEACON,
"beacon sent: rts %u, ack %u, btkill %u, rate %u, "
"duration %u, status %x, tsf %ju, mode %x\n",
stat->rtsfailcnt, stat->ackfailcnt,
stat->btkillcnt, stat->rate, le32toh(stat->duration),
le32toh(stat->status), *tsf, *mode);
break;
}
#endif
case WPI_UC_READY:
{
struct wpi_ucode_info *uc =
(struct wpi_ucode_info *)(desc + 1);
/* The microcontroller is ready. */
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
DPRINTF(sc, WPI_DEBUG_RESET,
"microcode alive notification version=%d.%d "
"subtype=%x alive=%x\n", uc->major, uc->minor,
uc->subtype, le32toh(uc->valid));
if (le32toh(uc->valid) != 1) {
device_printf(sc->sc_dev,
"microcontroller initialization failed\n");
wpi_stop_locked(sc);
+ return;
}
/* Save the address of the error log in SRAM. */
sc->errptr = le32toh(uc->errptr);
break;
}
case WPI_STATE_CHANGED:
{
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
uint32_t *status = (uint32_t *)(desc + 1);
DPRINTF(sc, WPI_DEBUG_STATE, "state changed to %x\n",
le32toh(*status));
if (le32toh(*status) & 1) {
WPI_NT_LOCK(sc);
wpi_clear_node_table(sc);
WPI_NT_UNLOCK(sc);
taskqueue_enqueue(sc->sc_tq,
&sc->sc_radiooff_task);
return;
}
break;
}
#ifdef WPI_DEBUG
case WPI_START_SCAN:
{
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
struct wpi_start_scan *scan =
(struct wpi_start_scan *)(desc + 1);
DPRINTF(sc, WPI_DEBUG_SCAN,
"%s: scanning channel %d status %x\n",
__func__, scan->chan, le32toh(scan->status));
break;
}
#endif
case WPI_STOP_SCAN:
{
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
struct wpi_stop_scan *scan =
(struct wpi_stop_scan *)(desc + 1);
DPRINTF(sc, WPI_DEBUG_SCAN,
"scan finished nchan=%d status=%d chan=%d\n",
scan->nchan, scan->status, scan->chan);
WPI_RXON_LOCK(sc);
callout_stop(&sc->scan_timeout);
WPI_RXON_UNLOCK(sc);
if (scan->status == WPI_SCAN_ABORTED)
ieee80211_cancel_scan(vap);
else
ieee80211_scan_next(vap);
break;
}
}
if (sc->rxq.cur % 8 == 0) {
/* Tell the firmware what we have processed. */
sc->sc_update_rx_ring(sc);
}
}
}
/*
* Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
* from power-down sleep mode.
*/
static void
wpi_wakeup_intr(struct wpi_softc *sc)
{
int qid;
DPRINTF(sc, WPI_DEBUG_PWRSAVE,
"%s: ucode wakeup from power-down sleep\n", __func__);
/* Wakeup RX and TX rings. */
if (sc->rxq.update) {
sc->rxq.update = 0;
wpi_update_rx_ring(sc);
}
WPI_TXQ_LOCK(sc);
for (qid = 0; qid < WPI_DRV_NTXQUEUES; qid++) {
struct wpi_tx_ring *ring = &sc->txq[qid];
if (ring->update) {
ring->update = 0;
wpi_update_tx_ring(sc, ring);
}
}
WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_MAC_ACCESS_REQ);
WPI_TXQ_UNLOCK(sc);
}
/*
* This function prints firmware registers
*/
#ifdef WPI_DEBUG
static void
wpi_debug_registers(struct wpi_softc *sc)
{
size_t i;
static const uint32_t csr_tbl[] = {
WPI_HW_IF_CONFIG,
WPI_INT,
WPI_INT_MASK,
WPI_FH_INT,
WPI_GPIO_IN,
WPI_RESET,
WPI_GP_CNTRL,
WPI_EEPROM,
WPI_EEPROM_GP,
WPI_GIO,
WPI_UCODE_GP1,
WPI_UCODE_GP2,
WPI_GIO_CHICKEN,
WPI_ANA_PLL,
WPI_DBG_HPET_MEM,
};
static const uint32_t prph_tbl[] = {
WPI_APMG_CLK_CTRL,
WPI_APMG_PS,
WPI_APMG_PCI_STT,
WPI_APMG_RFKILL,
};
DPRINTF(sc, WPI_DEBUG_REGISTER,"%s","\n");
for (i = 0; i < nitems(csr_tbl); i++) {
DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ",
wpi_get_csr_string(csr_tbl[i]), WPI_READ(sc, csr_tbl[i]));
if ((i + 1) % 2 == 0)
DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
}
DPRINTF(sc, WPI_DEBUG_REGISTER, "\n\n");
if (wpi_nic_lock(sc) == 0) {
for (i = 0; i < nitems(prph_tbl); i++) {
DPRINTF(sc, WPI_DEBUG_REGISTER, " %-18s: 0x%08x ",
wpi_get_prph_string(prph_tbl[i]),
wpi_prph_read(sc, prph_tbl[i]));
if ((i + 1) % 2 == 0)
DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
}
DPRINTF(sc, WPI_DEBUG_REGISTER, "\n");
wpi_nic_unlock(sc);
} else {
DPRINTF(sc, WPI_DEBUG_REGISTER,
"Cannot access internal registers.\n");
}
}
#endif
/*
* Dump the error log of the firmware when a firmware panic occurs. Although
* we can't debug the firmware because it is neither open source nor free, it
* can help us to identify certain classes of problems.
*/
static void
wpi_fatal_intr(struct wpi_softc *sc)
{
struct wpi_fw_dump dump;
uint32_t i, offset, count;
/* Check that the error log address is valid. */
if (sc->errptr < WPI_FW_DATA_BASE ||
sc->errptr + sizeof (dump) >
WPI_FW_DATA_BASE + WPI_FW_DATA_MAXSZ) {
printf("%s: bad firmware error log address 0x%08x\n", __func__,
sc->errptr);
return;
}
if (wpi_nic_lock(sc) != 0) {
printf("%s: could not read firmware error log\n", __func__);
return;
}
/* Read number of entries in the log. */
count = wpi_mem_read(sc, sc->errptr);
if (count == 0 || count * sizeof (dump) > WPI_FW_DATA_MAXSZ) {
printf("%s: invalid count field (count = %u)\n", __func__,
count);
wpi_nic_unlock(sc);
return;
}
/* Skip "count" field. */
offset = sc->errptr + sizeof (uint32_t);
printf("firmware error log (count = %u):\n", count);
for (i = 0; i < count; i++) {
wpi_mem_read_region_4(sc, offset, (uint32_t *)&dump,
sizeof (dump) / sizeof (uint32_t));
printf(" error type = \"%s\" (0x%08X)\n",
(dump.desc < nitems(wpi_fw_errmsg)) ?
wpi_fw_errmsg[dump.desc] : "UNKNOWN",
dump.desc);
printf(" error data = 0x%08X\n",
dump.data);
printf(" branch link = 0x%08X%08X\n",
dump.blink[0], dump.blink[1]);
printf(" interrupt link = 0x%08X%08X\n",
dump.ilink[0], dump.ilink[1]);
printf(" time = %u\n", dump.time);
offset += sizeof (dump);
}
wpi_nic_unlock(sc);
/* Dump driver status (TX and RX rings) while we're here. */
printf("driver status:\n");
WPI_TXQ_LOCK(sc);
for (i = 0; i < WPI_DRV_NTXQUEUES; i++) {
struct wpi_tx_ring *ring = &sc->txq[i];
printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
i, ring->qid, ring->cur, ring->queued);
}
WPI_TXQ_UNLOCK(sc);
printf(" rx ring: cur=%d\n", sc->rxq.cur);
}
static void
wpi_intr(void *arg)
{
struct wpi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
uint32_t r1, r2;
WPI_LOCK(sc);
/* Disable interrupts. */
WPI_WRITE(sc, WPI_INT_MASK, 0);
r1 = WPI_READ(sc, WPI_INT);
if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
goto end; /* Hardware gone! */
r2 = WPI_READ(sc, WPI_FH_INT);
DPRINTF(sc, WPI_DEBUG_INTR, "%s: reg1=0x%08x reg2=0x%08x\n", __func__,
r1, r2);
if (r1 == 0 && r2 == 0)
goto done; /* Interrupt not for us. */
/* Acknowledge interrupts. */
WPI_WRITE(sc, WPI_INT, r1);
WPI_WRITE(sc, WPI_FH_INT, r2);
if (r1 & (WPI_INT_SW_ERR | WPI_INT_HW_ERR)) {
device_printf(sc->sc_dev, "fatal firmware error\n");
#ifdef WPI_DEBUG
wpi_debug_registers(sc);
#endif
wpi_fatal_intr(sc);
DPRINTF(sc, WPI_DEBUG_HW,
"(%s)\n", (r1 & WPI_INT_SW_ERR) ? "(Software Error)" :
"(Hardware Error)");
taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask);
goto end;
}
if ((r1 & (WPI_INT_FH_RX | WPI_INT_SW_RX)) ||
(r2 & WPI_FH_INT_RX))
wpi_notif_intr(sc);
if (r1 & WPI_INT_ALIVE)
wakeup(sc); /* Firmware is alive. */
if (r1 & WPI_INT_WAKEUP)
wpi_wakeup_intr(sc);
done:
/* Re-enable interrupts. */
- if (ifp->if_flags & IFF_UP)
+ if (sc->sc_running)
WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
end: WPI_UNLOCK(sc);
}
static int
wpi_cmd2(struct wpi_softc *sc, struct wpi_buf *buf)
{
- struct ifnet *ifp = sc->sc_ifp;
struct ieee80211_frame *wh;
struct wpi_tx_cmd *cmd;
struct wpi_tx_data *data;
struct wpi_tx_desc *desc;
struct wpi_tx_ring *ring;
struct mbuf *m1;
bus_dma_segment_t *seg, segs[WPI_MAX_SCATTER];
int error, i, hdrlen, nsegs, totlen, pad;
WPI_TXQ_LOCK(sc);
KASSERT(buf->size <= sizeof(buf->data), ("buffer overflow"));
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- if (sc->txq_active == 0) {
+ if (sc->sc_running == 0) {
/* wpi_stop() was called */
error = ENETDOWN;
goto fail;
}
wh = mtod(buf->m, struct ieee80211_frame *);
hdrlen = ieee80211_anyhdrsize(wh);
totlen = buf->m->m_pkthdr.len;
if (hdrlen & 3) {
/* First segment length must be a multiple of 4. */
pad = 4 - (hdrlen & 3);
} else
pad = 0;
ring = &sc->txq[buf->ac];
desc = &ring->desc[ring->cur];
data = &ring->data[ring->cur];
/* Prepare TX firmware command. */
cmd = &ring->cmd[ring->cur];
cmd->code = buf->code;
cmd->flags = 0;
cmd->qid = ring->qid;
cmd->idx = ring->cur;
memcpy(cmd->data, buf->data, buf->size);
/* Save and trim IEEE802.11 header. */
memcpy((uint8_t *)(cmd->data + buf->size), wh, hdrlen);
m_adj(buf->m, hdrlen);
error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, buf->m,
segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0 && error != EFBIG) {
device_printf(sc->sc_dev,
"%s: can't map mbuf (error %d)\n", __func__, error);
goto fail;
}
if (error != 0) {
/* Too many DMA segments, linearize mbuf. */
m1 = m_collapse(buf->m, M_NOWAIT, WPI_MAX_SCATTER - 1);
if (m1 == NULL) {
device_printf(sc->sc_dev,
"%s: could not defrag mbuf\n", __func__);
error = ENOBUFS;
goto fail;
}
buf->m = m1;
error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map,
buf->m, segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: can't map mbuf (error %d)\n", __func__,
error);
goto fail;
}
}
KASSERT(nsegs < WPI_MAX_SCATTER,
("too many DMA segments, nsegs (%d) should be less than %d",
nsegs, WPI_MAX_SCATTER));
data->m = buf->m;
data->ni = buf->ni;
DPRINTF(sc, WPI_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n",
__func__, ring->qid, ring->cur, totlen, nsegs);
/* Fill TX descriptor. */
desc->nsegs = WPI_PAD32(totlen + pad) << 4 | (1 + nsegs);
/* First DMA segment is used by the TX command. */
desc->segs[0].addr = htole32(data->cmd_paddr);
desc->segs[0].len = htole32(4 + buf->size + hdrlen + pad);
/* Other DMA segments are for data payload. */
seg = &segs[0];
for (i = 1; i <= nsegs; i++) {
desc->segs[i].addr = htole32(seg->ds_addr);
desc->segs[i].len = htole32(seg->ds_len);
seg++;
}
bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
/* Kick TX ring. */
ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
sc->sc_update_tx_ring(sc, ring);
if (ring->qid < WPI_CMD_QUEUE_NUM) {
/* Mark TX ring as full if we reach a certain threshold. */
WPI_TXQ_STATE_LOCK(sc);
- if (++ring->queued > WPI_TX_RING_HIMARK) {
+ if (++ring->queued > WPI_TX_RING_HIMARK)
sc->qfullmsk |= 1 << ring->qid;
-
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags |= IFF_DRV_OACTIVE;
- IF_UNLOCK(&ifp->if_snd);
- }
-
callout_reset(&sc->tx_timeout, 5*hz, wpi_tx_timeout, sc);
WPI_TXQ_STATE_UNLOCK(sc);
}
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
WPI_TXQ_UNLOCK(sc);
return 0;
fail: m_freem(buf->m);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
WPI_TXQ_UNLOCK(sc);
return error;
}
/*
* Construct the data packet for a transmit buffer.
*/
static int
wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
{
const struct ieee80211_txparam *tp;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct wpi_node *wn = WPI_NODE(ni);
struct ieee80211_channel *chan;
struct ieee80211_frame *wh;
struct ieee80211_key *k = NULL;
struct wpi_buf tx_data;
struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data;
uint32_t flags;
uint16_t qos;
uint8_t tid, type;
int ac, error, swcrypt, rate, ismcast, totlen;
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
/* Select EDCA Access Category and TX ring for this frame. */
if (IEEE80211_QOS_HAS_SEQ(wh)) {
qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
tid = qos & IEEE80211_QOS_TID;
} else {
qos = 0;
tid = 0;
}
ac = M_WME_GETAC(m);
chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ?
ni->ni_chan : ic->ic_curchan;
tp = &vap->iv_txparms[ieee80211_chan2mode(chan)];
/* Choose a TX rate index. */
if (type == IEEE80211_FC0_TYPE_MGT)
rate = tp->mgmtrate;
else if (ismcast)
rate = tp->mcastrate;
else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rate = tp->ucastrate;
else if (m->m_flags & M_EAPOL)
rate = tp->mgmtrate;
else {
/* XXX pass pktlen */
(void) ieee80211_ratectl_rate(ni, NULL, 0);
rate = ni->ni_txrate;
}
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX. */
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
error = ENOBUFS;
goto fail;
}
swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT;
/* 802.11 header may have moved. */
wh = mtod(m, struct ieee80211_frame *);
}
totlen = m->m_pkthdr.len;
if (ieee80211_radiotap_active_vap(vap)) {
struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
if (k != NULL)
tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
ieee80211_radiotap_tx(vap, m);
}
flags = 0;
if (!ismcast) {
/* Unicast frame, check if an ACK is expected. */
if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
IEEE80211_QOS_ACKPOLICY_NOACK)
flags |= WPI_TX_NEED_ACK;
}
if (!IEEE80211_QOS_HAS_SEQ(wh))
flags |= WPI_TX_AUTO_SEQ;
if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
flags |= WPI_TX_MORE_FRAG; /* Cannot happen yet. */
/* Check if frame must be protected using RTS/CTS or CTS-to-self. */
if (!ismcast) {
/* NB: Group frames are sent using CCK in 802.11b/g. */
if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) {
flags |= WPI_TX_NEED_RTS;
} else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
WPI_RATE_IS_OFDM(rate)) {
if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
flags |= WPI_TX_NEED_CTS;
else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
flags |= WPI_TX_NEED_RTS;
}
if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
flags |= WPI_TX_FULL_TXOP;
}
memset(tx, 0, sizeof (struct wpi_cmd_data));
if (type == IEEE80211_FC0_TYPE_MGT) {
uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
/* Tell HW to set timestamp in probe responses. */
if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
flags |= WPI_TX_INSERT_TSTAMP;
if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
tx->timeout = htole16(3);
else
tx->timeout = htole16(2);
}
if (ismcast || type != IEEE80211_FC0_TYPE_DATA)
tx->id = WPI_ID_BROADCAST;
else {
if (wn->id == WPI_ID_UNDEFINED) {
device_printf(sc->sc_dev,
"%s: undefined node id\n", __func__);
error = EINVAL;
goto fail;
}
tx->id = wn->id;
}
if (k != NULL && !swcrypt) {
switch (k->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_AES_CCM:
tx->security = WPI_CIPHER_CCMP;
break;
default:
break;
}
memcpy(tx->key, k->wk_key, k->wk_keylen);
}
tx->len = htole16(totlen);
tx->flags = htole32(flags);
tx->plcp = rate2plcp(rate);
tx->tid = tid;
tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
tx->ofdm_mask = 0xff;
tx->cck_mask = 0x0f;
tx->rts_ntries = 7;
tx->data_ntries = tp->maxretry;
tx_data.ni = ni;
tx_data.m = m;
tx_data.size = sizeof(struct wpi_cmd_data);
tx_data.code = WPI_CMD_TX_DATA;
tx_data.ac = ac;
return wpi_cmd2(sc, &tx_data);
fail: m_freem(m);
return error;
}
static int
wpi_tx_data_raw(struct wpi_softc *sc, struct mbuf *m,
struct ieee80211_node *ni, const struct ieee80211_bpf_params *params)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_key *k = NULL;
struct ieee80211_frame *wh;
struct wpi_buf tx_data;
struct wpi_cmd_data *tx = (struct wpi_cmd_data *)&tx_data.data;
uint32_t flags;
uint8_t type;
int ac, rate, swcrypt, totlen;
wh = mtod(m, struct ieee80211_frame *);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
ac = params->ibp_pri & 3;
/* Choose a TX rate index. */
rate = params->ibp_rate0;
flags = 0;
if (!IEEE80211_QOS_HAS_SEQ(wh))
flags |= WPI_TX_AUTO_SEQ;
if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
flags |= WPI_TX_NEED_ACK;
if (params->ibp_flags & IEEE80211_BPF_RTS)
flags |= WPI_TX_NEED_RTS;
if (params->ibp_flags & IEEE80211_BPF_CTS)
flags |= WPI_TX_NEED_CTS;
if (flags & (WPI_TX_NEED_RTS | WPI_TX_NEED_CTS))
flags |= WPI_TX_FULL_TXOP;
/* Encrypt the frame if need be. */
if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
/* Retrieve key for TX. */
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
m_freem(m);
return ENOBUFS;
}
swcrypt = k->wk_flags & IEEE80211_KEY_SWCRYPT;
/* 802.11 header may have moved. */
wh = mtod(m, struct ieee80211_frame *);
}
totlen = m->m_pkthdr.len;
if (ieee80211_radiotap_active_vap(vap)) {
struct wpi_tx_radiotap_header *tap = &sc->sc_txtap;
tap->wt_flags = 0;
tap->wt_rate = rate;
if (params->ibp_flags & IEEE80211_BPF_CRYPTO)
tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
ieee80211_radiotap_tx(vap, m);
}
memset(tx, 0, sizeof (struct wpi_cmd_data));
if (type == IEEE80211_FC0_TYPE_MGT) {
uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
/* Tell HW to set timestamp in probe responses. */
if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
flags |= WPI_TX_INSERT_TSTAMP;
if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
tx->timeout = htole16(3);
else
tx->timeout = htole16(2);
}
if (k != NULL && !swcrypt) {
switch (k->wk_cipher->ic_cipher) {
case IEEE80211_CIPHER_AES_CCM:
tx->security = WPI_CIPHER_CCMP;
break;
default:
break;
}
memcpy(tx->key, k->wk_key, k->wk_keylen);
}
tx->len = htole16(totlen);
tx->flags = htole32(flags);
tx->plcp = rate2plcp(rate);
tx->id = WPI_ID_BROADCAST;
tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
tx->rts_ntries = params->ibp_try1;
tx->data_ntries = params->ibp_try0;
tx_data.ni = ni;
tx_data.m = m;
tx_data.size = sizeof(struct wpi_cmd_data);
tx_data.code = WPI_CMD_TX_DATA;
tx_data.ac = ac;
return wpi_cmd2(sc, &tx_data);
}
+static __inline int
+wpi_tx_ring_is_full(struct wpi_softc *sc, int ac)
+{
+ struct wpi_tx_ring *ring = &sc->txq[ac];
+ int retval;
+
+ WPI_TXQ_STATE_LOCK(sc);
+ retval = (ring->queued > WPI_TX_RING_HIMARK);
+ WPI_TXQ_STATE_UNLOCK(sc);
+
+ return retval;
+}
+
+static __inline void
+wpi_handle_tx_failure(struct ieee80211_node *ni)
+{
+ /* NB: m is reclaimed on tx failure */
+ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
+ ieee80211_free_node(ni);
+}
+
static int
wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = ni->ni_ic;
- struct ifnet *ifp = ic->ic_ifp;
struct wpi_softc *sc = ic->ic_softc;
- int error = 0;
+ int ac, error = 0;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
- ieee80211_free_node(ni);
+ ac = M_WME_GETAC(m);
+
+ WPI_TX_LOCK(sc);
+
+ if (sc->sc_running == 0 || wpi_tx_ring_is_full(sc, ac)) {
m_freem(m);
- return ENETDOWN;
+ error = sc->sc_running ? ENOBUFS : ENETDOWN;
+ goto unlock;
}
- WPI_TX_LOCK(sc);
if (params == NULL) {
/*
* Legacy path; interpret frame contents to decide
* precisely how to send the frame.
*/
error = wpi_tx_data(sc, m, ni);
} else {
/*
* Caller supplied explicit parameters to use in
* sending the frame.
*/
error = wpi_tx_data_raw(sc, m, ni, params);
}
- WPI_TX_UNLOCK(sc);
- if (error != 0) {
- /* NB: m is reclaimed on tx failure */
- ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+unlock: WPI_TX_UNLOCK(sc);
+ if (error != 0) {
+ wpi_handle_tx_failure(ni);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
return error;
}
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
}
-/**
- * Process data waiting to be sent on the IFNET output queue
- */
-static void
-wpi_start(struct ifnet *ifp)
+static int
+wpi_transmit(struct ieee80211com *ic, struct mbuf *m)
{
- struct wpi_softc *sc = ifp->if_softc;
+ struct wpi_softc *sc = ic->ic_softc;
struct ieee80211_node *ni;
- struct mbuf *m;
+ struct mbufq *sndq;
+ int ac, error;
WPI_TX_LOCK(sc);
DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__);
- for (;;) {
- IF_LOCK(&ifp->if_snd);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
- (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
- IF_UNLOCK(&ifp->if_snd);
- break;
- }
- IF_UNLOCK(&ifp->if_snd);
+ /* Check if interface is up & running. */
+ if (sc->sc_running == 0) {
+ error = ENXIO;
+ goto unlock;
+ }
- IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL)
- break;
- ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
- if (wpi_tx_data(sc, m, ni) != 0) {
- ieee80211_free_node(ni);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- }
+ /* Check for available space. */
+ ac = M_WME_GETAC(m);
+ sndq = &sc->txq[ac].snd;
+ if (wpi_tx_ring_is_full(sc, ac) || mbufq_len(sndq) != 0) {
+ /* wpi_tx_done() will dequeue it. */
+ error = mbufq_enqueue(sndq, m);
+ goto unlock;
}
+ error = 0;
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ if (wpi_tx_data(sc, m, ni) != 0) {
+ wpi_handle_tx_failure(ni);
+ }
+
DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__);
- WPI_TX_UNLOCK(sc);
+
+unlock: WPI_TX_UNLOCK(sc);
+
+ return (error);
}
+/**
+ * Process data waiting to be sent on the output queue
+ */
static void
-wpi_start_task(void *arg0, int pending)
+wpi_start(void *arg0, int pending)
{
struct wpi_softc *sc = arg0;
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211_node *ni;
+ struct mbuf *m;
+ uint8_t i;
- wpi_start(ifp);
+ WPI_TX_LOCK(sc);
+ if (sc->sc_running == 0)
+ goto unlock;
+
+ DPRINTF(sc, WPI_DEBUG_XMIT, "%s: called\n", __func__);
+
+ for (i = 0; i < WPI_CMD_QUEUE_NUM; i++) {
+ struct mbufq *sndq = &sc->txq[i].snd;
+
+ for (;;) {
+ if (wpi_tx_ring_is_full(sc, i))
+ break;
+
+ if ((m = mbufq_dequeue(sndq)) == NULL)
+ break;
+
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ if (wpi_tx_data(sc, m, ni) != 0) {
+ wpi_handle_tx_failure(ni);
+ }
+ }
+ }
+
+ DPRINTF(sc, WPI_DEBUG_XMIT, "%s: done\n", __func__);
+unlock: WPI_TX_UNLOCK(sc);
}
static void
wpi_watchdog_rfkill(void *arg)
{
struct wpi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
DPRINTF(sc, WPI_DEBUG_WATCHDOG, "RFkill Watchdog: tick\n");
/* No need to lock firmware memory. */
if ((wpi_prph_read(sc, WPI_APMG_RFKILL) & 0x1) == 0) {
/* Radio kill switch is still off. */
callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
sc);
} else
ieee80211_runtask(ic, &sc->sc_radioon_task);
}
static void
wpi_scan_timeout(void *arg)
{
struct wpi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
- if_printf(ifp, "scan timeout\n");
+ ic_printf(ic, "scan timeout\n");
taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask);
}
static void
wpi_tx_timeout(void *arg)
{
struct wpi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
+ struct ieee80211com *ic = &sc->sc_ic;
- if_printf(ifp, "device timeout\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ ic_printf(ic, "device timeout\n");
taskqueue_enqueue(sc->sc_tq, &sc->sc_reinittask);
}
-static int
-wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
+static void
+wpi_parent(struct ieee80211com *ic)
{
- struct wpi_softc *sc = ifp->if_softc;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct wpi_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- struct ifreq *ifr = (struct ifreq *) data;
- int error = 0;
- switch (cmd) {
- case SIOCGIFADDR:
- error = ether_ioctl(ifp, cmd, data);
- break;
- case SIOCSIFFLAGS:
- if (ifp->if_flags & IFF_UP) {
- wpi_init(sc);
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 &&
- vap != NULL)
- ieee80211_stop(vap);
- } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
- wpi_stop(sc);
- break;
- case SIOCGIFMEDIA:
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- default:
- error = EINVAL;
- break;
- }
- return error;
+ if (ic->ic_nrunning > 0) {
+ if (wpi_init(sc) == 0) {
+ ieee80211_notify_radio(ic, 1);
+ ieee80211_start_all(ic);
+ } else {
+ ieee80211_notify_radio(ic, 0);
+ ieee80211_stop(vap);
+ }
+ } else
+ wpi_stop(sc);
}
/*
* Send a command to the firmware.
*/
static int
wpi_cmd(struct wpi_softc *sc, int code, const void *buf, size_t size,
int async)
{
struct wpi_tx_ring *ring = &sc->txq[WPI_CMD_QUEUE_NUM];
struct wpi_tx_desc *desc;
struct wpi_tx_data *data;
struct wpi_tx_cmd *cmd;
struct mbuf *m;
bus_addr_t paddr;
int totlen, error;
WPI_TXQ_LOCK(sc);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- if (sc->txq_active == 0) {
+ if (sc->sc_running == 0) {
/* wpi_stop() was called */
- error = 0;
+ if (code == WPI_CMD_SCAN)
+ error = ENETDOWN;
+ else
+ error = 0;
+
goto fail;
}
if (async == 0)
WPI_LOCK_ASSERT(sc);
DPRINTF(sc, WPI_DEBUG_CMD, "%s: cmd %s size %zu async %d\n",
__func__, wpi_cmd_str(code), size, async);
desc = &ring->desc[ring->cur];
data = &ring->data[ring->cur];
totlen = 4 + size;
if (size > sizeof cmd->data) {
/* Command is too large to fit in a descriptor. */
if (totlen > MCLBYTES) {
error = EINVAL;
goto fail;
}
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
if (m == NULL) {
error = ENOMEM;
goto fail;
}
cmd = mtod(m, struct wpi_tx_cmd *);
error = bus_dmamap_load(ring->data_dmat, data->map, cmd,
totlen, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT);
if (error != 0) {
m_freem(m);
goto fail;
}
data->m = m;
} else {
cmd = &ring->cmd[ring->cur];
paddr = data->cmd_paddr;
}
cmd->code = code;
cmd->flags = 0;
cmd->qid = ring->qid;
cmd->idx = ring->cur;
memcpy(cmd->data, buf, size);
desc->nsegs = 1 + (WPI_PAD32(size) << 4);
desc->segs[0].addr = htole32(paddr);
desc->segs[0].len = htole32(totlen);
if (size > sizeof cmd->data) {
bus_dmamap_sync(ring->data_dmat, data->map,
BUS_DMASYNC_PREWRITE);
} else {
bus_dmamap_sync(ring->data_dmat, ring->cmd_dma.map,
BUS_DMASYNC_PREWRITE);
}
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
/* Kick command ring. */
ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT;
sc->sc_update_tx_ring(sc, ring);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
WPI_TXQ_UNLOCK(sc);
- if (async)
- return 0;
+ return async ? 0 : mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz);
- return mtx_sleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz);
-
fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
WPI_TXQ_UNLOCK(sc);
return error;
}
/*
* Configure HW multi-rate retries.
*/
static int
wpi_mrr_setup(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct wpi_mrr_setup mrr;
int i, error;
/* CCK rates (not used with 802.11a). */
for (i = WPI_RIDX_CCK1; i <= WPI_RIDX_CCK11; i++) {
mrr.rates[i].flags = 0;
mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
/* Fallback to the immediate lower CCK rate (if any.) */
mrr.rates[i].next =
(i == WPI_RIDX_CCK1) ? WPI_RIDX_CCK1 : i - 1;
/* Try twice at this rate before falling back to "next". */
mrr.rates[i].ntries = WPI_NTRIES_DEFAULT;
}
/* OFDM rates (not used with 802.11b). */
for (i = WPI_RIDX_OFDM6; i <= WPI_RIDX_OFDM54; i++) {
mrr.rates[i].flags = 0;
mrr.rates[i].plcp = wpi_ridx_to_plcp[i];
/* Fallback to the immediate lower rate (if any.) */
/* We allow fallback from OFDM/6 to CCK/2 in 11b/g mode. */
mrr.rates[i].next = (i == WPI_RIDX_OFDM6) ?
((ic->ic_curmode == IEEE80211_MODE_11A) ?
WPI_RIDX_OFDM6 : WPI_RIDX_CCK2) :
i - 1;
/* Try twice at this rate before falling back to "next". */
mrr.rates[i].ntries = WPI_NTRIES_DEFAULT;
}
/* Setup MRR for control frames. */
mrr.which = htole32(WPI_MRR_CTL);
error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not setup MRR for control frames\n");
return error;
}
/* Setup MRR for data frames. */
mrr.which = htole32(WPI_MRR_DATA);
error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0);
if (error != 0) {
device_printf(sc->sc_dev,
"could not setup MRR for data frames\n");
return error;
}
return 0;
}
static int
wpi_add_node(struct wpi_softc *sc, struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
struct wpi_vap *wvp = WPI_VAP(ni->ni_vap);
struct wpi_node *wn = WPI_NODE(ni);
struct wpi_node_info node;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
if (wn->id == WPI_ID_UNDEFINED)
return EINVAL;
memset(&node, 0, sizeof node);
IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
node.id = wn->id;
node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
node.action = htole32(WPI_ACTION_SET_RATE);
node.antenna = WPI_ANTENNA_BOTH;
DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding node %d (%s)\n", __func__,
wn->id, ether_sprintf(ni->ni_macaddr));
error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: wpi_cmd() call failed with error code %d\n", __func__,
error);
return error;
}
if (wvp->wv_gtk != 0) {
error = wpi_set_global_keys(ni);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: error while setting global keys\n", __func__);
return ENXIO;
}
}
return 0;
}
/*
* Broadcast node is used to send group-addressed and management frames.
*/
static int
wpi_add_broadcast_node(struct wpi_softc *sc, int async)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct wpi_node_info node;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
memset(&node, 0, sizeof node);
- IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr);
+ IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr);
node.id = WPI_ID_BROADCAST;
node.plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
node.action = htole32(WPI_ACTION_SET_RATE);
node.antenna = WPI_ANTENNA_BOTH;
DPRINTF(sc, WPI_DEBUG_NODE, "%s: adding broadcast node\n", __func__);
return wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, async);
}
static int
wpi_add_sta_node(struct wpi_softc *sc, struct ieee80211_node *ni)
{
struct wpi_node *wn = WPI_NODE(ni);
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
wn->id = wpi_add_node_entry_sta(sc);
if ((error = wpi_add_node(sc, ni)) != 0) {
wpi_del_node_entry(sc, wn->id);
wn->id = WPI_ID_UNDEFINED;
return error;
}
return 0;
}
static int
wpi_add_ibss_node(struct wpi_softc *sc, struct ieee80211_node *ni)
{
struct wpi_node *wn = WPI_NODE(ni);
int error;
KASSERT(wn->id == WPI_ID_UNDEFINED,
("the node %d was added before", wn->id));
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
if ((wn->id = wpi_add_node_entry_adhoc(sc)) == WPI_ID_UNDEFINED) {
device_printf(sc->sc_dev, "%s: h/w table is full\n", __func__);
return ENOMEM;
}
if ((error = wpi_add_node(sc, ni)) != 0) {
wpi_del_node_entry(sc, wn->id);
wn->id = WPI_ID_UNDEFINED;
return error;
}
return 0;
}
static void
wpi_del_node(struct wpi_softc *sc, struct ieee80211_node *ni)
{
struct wpi_node *wn = WPI_NODE(ni);
struct wpi_cmd_del_node node;
int error;
KASSERT(wn->id != WPI_ID_UNDEFINED, ("undefined node id passed"));
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
memset(&node, 0, sizeof node);
IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
node.count = 1;
DPRINTF(sc, WPI_DEBUG_NODE, "%s: deleting node %d (%s)\n", __func__,
wn->id, ether_sprintf(ni->ni_macaddr));
error = wpi_cmd(sc, WPI_CMD_DEL_NODE, &node, sizeof node, 1);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not delete node %u, error %d\n", __func__,
wn->id, error);
}
}
static int
wpi_updateedca(struct ieee80211com *ic)
{
#define WPI_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
struct wpi_softc *sc = ic->ic_softc;
struct wpi_edca_params cmd;
int aci, error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
memset(&cmd, 0, sizeof cmd);
cmd.flags = htole32(WPI_EDCA_UPDATE);
for (aci = 0; aci < WME_NUM_AC; aci++) {
const struct wmeParams *ac =
&ic->ic_wme.wme_chanParams.cap_wmeParams[aci];
cmd.ac[aci].aifsn = ac->wmep_aifsn;
cmd.ac[aci].cwmin = htole16(WPI_EXP2(ac->wmep_logcwmin));
cmd.ac[aci].cwmax = htole16(WPI_EXP2(ac->wmep_logcwmax));
cmd.ac[aci].txoplimit =
htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit));
DPRINTF(sc, WPI_DEBUG_EDCA,
"setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d "
"txoplimit=%d\n", aci, cmd.ac[aci].aifsn,
cmd.ac[aci].cwmin, cmd.ac[aci].cwmax,
cmd.ac[aci].txoplimit);
}
error = wpi_cmd(sc, WPI_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return error;
#undef WPI_EXP2
}
static void
wpi_set_promisc(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
uint32_t promisc_filter;
promisc_filter = WPI_FILTER_CTL;
if (vap != NULL && vap->iv_opmode != IEEE80211_M_HOSTAP)
promisc_filter |= WPI_FILTER_PROMISC;
- if (ifp->if_flags & IFF_PROMISC)
+ if (ic->ic_promisc > 0)
sc->rxon.filter |= htole32(promisc_filter);
else
sc->rxon.filter &= ~htole32(promisc_filter);
}
static void
wpi_update_promisc(struct ieee80211com *ic)
{
struct wpi_softc *sc = ic->ic_softc;
WPI_RXON_LOCK(sc);
wpi_set_promisc(sc);
if (wpi_send_rxon(sc, 1, 1) != 0) {
device_printf(sc->sc_dev, "%s: could not send RXON\n",
__func__);
}
WPI_RXON_UNLOCK(sc);
}
static void
wpi_update_mcast(struct ieee80211com *ic)
{
/* Ignore */
}
static void
wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on)
{
struct wpi_cmd_led led;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
led.which = which;
led.unit = htole32(100000); /* on/off in unit of 100ms */
led.off = off;
led.on = on;
(void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1);
}
static int
wpi_set_timing(struct wpi_softc *sc, struct ieee80211_node *ni)
{
struct wpi_cmd_timing cmd;
uint64_t val, mod;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
memset(&cmd, 0, sizeof cmd);
memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
cmd.bintval = htole16(ni->ni_intval);
cmd.lintval = htole16(10);
/* Compute remaining time until next beacon. */
val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU;
mod = le64toh(cmd.tstamp) % val;
cmd.binitval = htole32((uint32_t)(val - mod));
DPRINTF(sc, WPI_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n",
ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod));
return wpi_cmd(sc, WPI_CMD_TIMING, &cmd, sizeof cmd, 1);
}
/*
* This function is called periodically (every 60 seconds) to adjust output
* power to temperature changes.
*/
static void
wpi_power_calibration(struct wpi_softc *sc)
{
int temp;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
/* Update sensor data. */
temp = (int)WPI_READ(sc, WPI_UCODE_GP2);
DPRINTF(sc, WPI_DEBUG_TEMP, "Temp in calibration is: %d\n", temp);
/* Sanity-check read value. */
if (temp < -260 || temp > 25) {
/* This can't be correct, ignore. */
DPRINTF(sc, WPI_DEBUG_TEMP,
"out-of-range temperature reported: %d\n", temp);
return;
}
DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d->%d\n", sc->temp, temp);
/* Adjust Tx power if need be. */
if (abs(temp - sc->temp) <= 6)
return;
sc->temp = temp;
if (wpi_set_txpower(sc, 1) != 0) {
/* just warn, too bad for the automatic calibration... */
device_printf(sc->sc_dev,"could not adjust Tx power\n");
}
}
/*
* Set TX power for current channel.
*/
static int
wpi_set_txpower(struct wpi_softc *sc, int async)
{
struct wpi_power_group *group;
struct wpi_cmd_txpower cmd;
uint8_t chan;
int idx, is_chan_5ghz, i;
/* Retrieve current channel from last RXON. */
chan = sc->rxon.chan;
is_chan_5ghz = (sc->rxon.flags & htole32(WPI_RXON_24GHZ)) == 0;
/* Find the TX power group to which this channel belongs. */
if (is_chan_5ghz) {
for (group = &sc->groups[1]; group < &sc->groups[4]; group++)
if (chan <= group->chan)
break;
} else
group = &sc->groups[0];
memset(&cmd, 0, sizeof cmd);
cmd.band = is_chan_5ghz ? WPI_BAND_5GHZ : WPI_BAND_2GHZ;
cmd.chan = htole16(chan);
/* Set TX power for all OFDM and CCK rates. */
for (i = 0; i <= WPI_RIDX_MAX ; i++) {
/* Retrieve TX power for this channel/rate. */
idx = wpi_get_power_index(sc, group, chan, is_chan_5ghz, i);
cmd.rates[i].plcp = wpi_ridx_to_plcp[i];
if (is_chan_5ghz) {
cmd.rates[i].rf_gain = wpi_rf_gain_5ghz[idx];
cmd.rates[i].dsp_gain = wpi_dsp_gain_5ghz[idx];
} else {
cmd.rates[i].rf_gain = wpi_rf_gain_2ghz[idx];
cmd.rates[i].dsp_gain = wpi_dsp_gain_2ghz[idx];
}
DPRINTF(sc, WPI_DEBUG_TEMP,
"chan %d/ridx %d: power index %d\n", chan, i, idx);
}
return wpi_cmd(sc, WPI_CMD_TXPOWER, &cmd, sizeof cmd, async);
}
/*
* Determine Tx power index for a given channel/rate combination.
* This takes into account the regulatory information from EEPROM and the
* current temperature.
*/
static int
wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group,
uint8_t chan, int is_chan_5ghz, int ridx)
{
/* Fixed-point arithmetic division using a n-bit fractional part. */
#define fdivround(a, b, n) \
((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
/* Linear interpolation. */
#define interpolate(x, x1, y1, x2, y2, n) \
((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
struct wpi_power_sample *sample;
int pwr, idx;
/* Default TX power is group maximum TX power minus 3dB. */
pwr = group->maxpwr / 2;
/* Decrease TX power for highest OFDM rates to reduce distortion. */
switch (ridx) {
case WPI_RIDX_OFDM36:
pwr -= is_chan_5ghz ? 5 : 0;
break;
case WPI_RIDX_OFDM48:
pwr -= is_chan_5ghz ? 10 : 7;
break;
case WPI_RIDX_OFDM54:
pwr -= is_chan_5ghz ? 12 : 9;
break;
}
/* Never exceed the channel maximum allowed TX power. */
pwr = min(pwr, sc->maxpwr[chan]);
/* Retrieve TX power index into gain tables from samples. */
for (sample = group->samples; sample < &group->samples[3]; sample++)
if (pwr > sample[1].power)
break;
/* Fixed-point linear interpolation using a 19-bit fractional part. */
idx = interpolate(pwr, sample[0].power, sample[0].index,
sample[1].power, sample[1].index, 19);
/*-
* Adjust power index based on current temperature:
* - if cooler than factory-calibrated: decrease output power
* - if warmer than factory-calibrated: increase output power
*/
idx -= (sc->temp - group->temp) * 11 / 100;
/* Decrease TX power for CCK rates (-5dB). */
if (ridx >= WPI_RIDX_CCK1)
idx += 10;
/* Make sure idx stays in a valid range. */
if (idx < 0)
return 0;
if (idx > WPI_MAX_PWR_INDEX)
return WPI_MAX_PWR_INDEX;
return idx;
#undef interpolate
#undef fdivround
}
/*
* Set STA mode power saving level (between 0 and 5).
* Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
*/
static int
wpi_set_pslevel(struct wpi_softc *sc, uint8_t dtim, int level, int async)
{
struct wpi_pmgt_cmd cmd;
const struct wpi_pmgt *pmgt;
uint32_t max, skip_dtim;
uint32_t reg;
int i;
DPRINTF(sc, WPI_DEBUG_PWRSAVE,
"%s: dtim=%d, level=%d, async=%d\n",
__func__, dtim, level, async);
/* Select which PS parameters to use. */
if (dtim <= 10)
pmgt = &wpi_pmgt[0][level];
else
pmgt = &wpi_pmgt[1][level];
memset(&cmd, 0, sizeof cmd);
WPI_TXQ_LOCK(sc);
if (level != 0) { /* not CAM */
cmd.flags |= htole16(WPI_PS_ALLOW_SLEEP);
sc->sc_flags |= WPI_PS_PATH;
} else
sc->sc_flags &= ~WPI_PS_PATH;
WPI_TXQ_UNLOCK(sc);
/* Retrieve PCIe Active State Power Management (ASPM). */
reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
if (!(reg & 0x1)) /* L0s Entry disabled. */
cmd.flags |= htole16(WPI_PS_PCI_PMGT);
cmd.rxtimeout = htole32(pmgt->rxtimeout * IEEE80211_DUR_TU);
cmd.txtimeout = htole32(pmgt->txtimeout * IEEE80211_DUR_TU);
if (dtim == 0) {
dtim = 1;
skip_dtim = 0;
} else
skip_dtim = pmgt->skip_dtim;
if (skip_dtim != 0) {
cmd.flags |= htole16(WPI_PS_SLEEP_OVER_DTIM);
max = pmgt->intval[4];
if (max == (uint32_t)-1)
max = dtim * (skip_dtim + 1);
else if (max > dtim)
max = (max / dtim) * dtim;
} else
max = dtim;
for (i = 0; i < 5; i++)
cmd.intval[i] = htole32(MIN(max, pmgt->intval[i]));
return wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
}
static int
wpi_send_btcoex(struct wpi_softc *sc)
{
struct wpi_bluetooth cmd;
memset(&cmd, 0, sizeof cmd);
cmd.flags = WPI_BT_COEX_MODE_4WIRE;
cmd.lead_time = WPI_BT_LEAD_TIME_DEF;
cmd.max_kill = WPI_BT_MAX_KILL_DEF;
DPRINTF(sc, WPI_DEBUG_RESET, "%s: configuring bluetooth coexistence\n",
__func__);
return wpi_cmd(sc, WPI_CMD_BT_COEX, &cmd, sizeof(cmd), 0);
}
static int
wpi_send_rxon(struct wpi_softc *sc, int assoc, int async)
{
int error;
if (async)
WPI_RXON_LOCK_ASSERT(sc);
if (assoc && wpi_check_bss_filter(sc) != 0) {
struct wpi_assoc rxon_assoc;
rxon_assoc.flags = sc->rxon.flags;
rxon_assoc.filter = sc->rxon.filter;
rxon_assoc.ofdm_mask = sc->rxon.ofdm_mask;
rxon_assoc.cck_mask = sc->rxon.cck_mask;
rxon_assoc.reserved = 0;
error = wpi_cmd(sc, WPI_CMD_RXON_ASSOC, &rxon_assoc,
sizeof (struct wpi_assoc), async);
if (error != 0) {
device_printf(sc->sc_dev,
"RXON_ASSOC command failed, error %d\n", error);
return error;
}
} else {
if (async) {
WPI_NT_LOCK(sc);
error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon,
sizeof (struct wpi_rxon), async);
if (error == 0)
wpi_clear_node_table(sc);
WPI_NT_UNLOCK(sc);
} else {
error = wpi_cmd(sc, WPI_CMD_RXON, &sc->rxon,
sizeof (struct wpi_rxon), async);
if (error == 0)
wpi_clear_node_table(sc);
}
if (error != 0) {
device_printf(sc->sc_dev,
"RXON command failed, error %d\n", error);
return error;
}
/* Add broadcast node. */
error = wpi_add_broadcast_node(sc, async);
if (error != 0) {
device_printf(sc->sc_dev,
"could not add broadcast node, error %d\n", error);
return error;
}
}
/* Configuration has changed, set Tx power accordingly. */
if ((error = wpi_set_txpower(sc, async)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set TX power, error %d\n", __func__, error);
return error;
}
return 0;
}
/**
* Configure the card to listen to a particular channel, this transisions the
* card in to being able to receive frames from remote devices.
*/
static int
wpi_config(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
struct ieee80211_channel *c = ic->ic_curchan;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
/* Set power saving level to CAM during initialization. */
if ((error = wpi_set_pslevel(sc, 0, 0, 0)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set power saving level\n", __func__);
return error;
}
/* Configure bluetooth coexistence. */
if ((error = wpi_send_btcoex(sc)) != 0) {
device_printf(sc->sc_dev,
"could not configure bluetooth coexistence\n");
return error;
}
/* Configure adapter. */
memset(&sc->rxon, 0, sizeof (struct wpi_rxon));
IEEE80211_ADDR_COPY(sc->rxon.myaddr, vap->iv_myaddr);
/* Set default channel. */
sc->rxon.chan = ieee80211_chan2ieee(ic, c);
sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
if (IEEE80211_IS_CHAN_2GHZ(c))
sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
sc->rxon.filter = WPI_FILTER_MULTICAST;
switch (ic->ic_opmode) {
case IEEE80211_M_STA:
sc->rxon.mode = WPI_MODE_STA;
break;
case IEEE80211_M_IBSS:
sc->rxon.mode = WPI_MODE_IBSS;
sc->rxon.filter |= WPI_FILTER_BEACON;
break;
case IEEE80211_M_HOSTAP:
/* XXX workaround for beaconing */
sc->rxon.mode = WPI_MODE_IBSS;
sc->rxon.filter |= WPI_FILTER_ASSOC | WPI_FILTER_PROMISC;
break;
case IEEE80211_M_AHDEMO:
sc->rxon.mode = WPI_MODE_HOSTAP;
break;
case IEEE80211_M_MONITOR:
sc->rxon.mode = WPI_MODE_MONITOR;
break;
default:
device_printf(sc->sc_dev, "unknown opmode %d\n",
ic->ic_opmode);
return EINVAL;
}
sc->rxon.filter = htole32(sc->rxon.filter);
wpi_set_promisc(sc);
sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
/* XXX Current configuration may be unusable. */
if (IEEE80211_IS_CHAN_NOADHOC(c) && sc->rxon.mode == WPI_MODE_IBSS) {
device_printf(sc->sc_dev,
"%s: invalid channel (%d) selected for IBSS mode\n",
__func__, ieee80211_chan2ieee(ic, c));
return EINVAL;
}
if ((error = wpi_send_rxon(sc, 0, 0)) != 0) {
device_printf(sc->sc_dev, "%s: could not send RXON\n",
__func__);
return error;
}
/* Setup rate scalling. */
if ((error = wpi_mrr_setup(sc)) != 0) {
device_printf(sc->sc_dev, "could not setup MRR, error %d\n",
error);
return error;
}
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
}
static uint16_t
wpi_get_active_dwell_time(struct wpi_softc *sc,
struct ieee80211_channel *c, uint8_t n_probes)
{
/* No channel? Default to 2GHz settings. */
if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) {
return (WPI_ACTIVE_DWELL_TIME_2GHZ +
WPI_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1));
}
/* 5GHz dwell time. */
return (WPI_ACTIVE_DWELL_TIME_5GHZ +
WPI_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1));
}
/*
* Limit the total dwell time.
*
* Returns the dwell time in milliseconds.
*/
static uint16_t
wpi_limit_dwell(struct wpi_softc *sc, uint16_t dwell_time)
{
- struct ieee80211com *ic = sc->sc_ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
int bintval = 0;
/* bintval is in TU (1.024mS) */
if (vap != NULL)
bintval = vap->iv_bss->ni_intval;
/*
* If it's non-zero, we should calculate the minimum of
* it and the DWELL_BASE.
*
* XXX Yes, the math should take into account that bintval
* is 1.024mS, not 1mS..
*/
if (bintval > 0) {
DPRINTF(sc, WPI_DEBUG_SCAN, "%s: bintval=%d\n", __func__,
bintval);
return (MIN(dwell_time, bintval - WPI_CHANNEL_TUNE_TIME * 2));
}
/* No association context? Default. */
return dwell_time;
}
static uint16_t
wpi_get_passive_dwell_time(struct wpi_softc *sc, struct ieee80211_channel *c)
{
uint16_t passive;
if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c))
passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_2GHZ;
else
passive = WPI_PASSIVE_DWELL_BASE + WPI_PASSIVE_DWELL_TIME_5GHZ;
/* Clamp to the beacon interval if we're associated. */
return (wpi_limit_dwell(sc, passive));
}
static uint32_t
wpi_get_scan_pause_time(uint32_t time, uint16_t bintval)
{
uint32_t mod = (time % bintval) * IEEE80211_DUR_TU;
uint32_t nbeacons = time / bintval;
if (mod > WPI_PAUSE_MAX_TIME)
mod = WPI_PAUSE_MAX_TIME;
return WPI_PAUSE_SCAN(nbeacons, mod);
}
/*
* Send a scan request to the firmware.
*/
static int
wpi_scan(struct wpi_softc *sc, struct ieee80211_channel *c)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_scan_state *ss = ic->ic_scan;
struct ieee80211vap *vap = ss->ss_vap;
struct wpi_scan_hdr *hdr;
struct wpi_cmd_data *tx;
struct wpi_scan_essid *essids;
struct wpi_scan_chan *chan;
struct ieee80211_frame *wh;
struct ieee80211_rateset *rs;
uint16_t dwell_active, dwell_passive;
uint8_t *buf, *frm;
int bgscan, bintval, buflen, error, i, nssid;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
/*
* We are absolutely not allowed to send a scan command when another
* scan command is pending.
*/
if (callout_pending(&sc->scan_timeout)) {
device_printf(sc->sc_dev, "%s: called whilst scanning!\n",
__func__);
error = EAGAIN;
goto fail;
}
bgscan = wpi_check_bss_filter(sc);
bintval = vap->iv_bss->ni_intval;
if (bgscan != 0 &&
bintval < WPI_QUIET_TIME_DEFAULT + WPI_CHANNEL_TUNE_TIME * 2) {
error = EOPNOTSUPP;
goto fail;
}
buf = malloc(WPI_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
if (buf == NULL) {
device_printf(sc->sc_dev,
"%s: could not allocate buffer for scan command\n",
__func__);
error = ENOMEM;
goto fail;
}
hdr = (struct wpi_scan_hdr *)buf;
/*
* Move to the next channel if no packets are received within 10 msecs
* after sending the probe request.
*/
hdr->quiet_time = htole16(WPI_QUIET_TIME_DEFAULT);
hdr->quiet_threshold = htole16(1);
if (bgscan != 0) {
/*
* Max needs to be greater than active and passive and quiet!
* It's also in microseconds!
*/
hdr->max_svc = htole32(250 * IEEE80211_DUR_TU);
hdr->pause_svc = htole32(wpi_get_scan_pause_time(100,
bintval));
}
hdr->filter = htole32(WPI_FILTER_MULTICAST | WPI_FILTER_BEACON);
tx = (struct wpi_cmd_data *)(hdr + 1);
tx->flags = htole32(WPI_TX_AUTO_SEQ);
tx->id = WPI_ID_BROADCAST;
tx->lifetime = htole32(WPI_LIFETIME_INFINITE);
if (IEEE80211_IS_CHAN_5GHZ(c)) {
/* Send probe requests at 6Mbps. */
tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_OFDM6];
rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
} else {
hdr->flags = htole32(WPI_RXON_24GHZ | WPI_RXON_AUTO);
/* Send probe requests at 1Mbps. */
tx->plcp = wpi_ridx_to_plcp[WPI_RIDX_CCK1];
rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
}
essids = (struct wpi_scan_essid *)(tx + 1);
nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS);
for (i = 0; i < nssid; i++) {
essids[i].id = IEEE80211_ELEMID_SSID;
essids[i].len = MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
memcpy(essids[i].data, ss->ss_ssid[i].ssid, essids[i].len);
#ifdef WPI_DEBUG
if (sc->sc_debug & WPI_DEBUG_SCAN) {
printf("Scanning Essid: ");
ieee80211_print_essid(essids[i].data, essids[i].len);
printf("\n");
}
#endif
}
/*
* Build a probe request frame. Most of the following code is a
* copy & paste of what is done in net80211.
*/
wh = (struct ieee80211_frame *)(essids + WPI_SCAN_MAX_ESSIDS);
wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
IEEE80211_FC0_SUBTYPE_PROBE_REQ;
wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
- IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr);
IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
- IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr);
- *(uint16_t *)&wh->i_dur[0] = 0; /* filled by h/w */
- *(uint16_t *)&wh->i_seq[0] = 0; /* filled by h/w */
+ IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr);
frm = (uint8_t *)(wh + 1);
frm = ieee80211_add_ssid(frm, NULL, 0);
frm = ieee80211_add_rates(frm, rs);
if (rs->rs_nrates > IEEE80211_RATE_SIZE)
frm = ieee80211_add_xrates(frm, rs);
/* Set length of probe request. */
tx->len = htole16(frm - (uint8_t *)wh);
/*
* Construct information about the channel that we
* want to scan. The firmware expects this to be directly
* after the scan probe request
*/
chan = (struct wpi_scan_chan *)frm;
chan->chan = htole16(ieee80211_chan2ieee(ic, c));
chan->flags = 0;
if (nssid) {
hdr->crc_threshold = WPI_SCAN_CRC_TH_DEFAULT;
chan->flags |= WPI_CHAN_NPBREQS(nssid);
} else
hdr->crc_threshold = WPI_SCAN_CRC_TH_NEVER;
if (!IEEE80211_IS_CHAN_PASSIVE(c))
chan->flags |= WPI_CHAN_ACTIVE;
/*
* Calculate the active/passive dwell times.
*/
-
dwell_active = wpi_get_active_dwell_time(sc, c, nssid);
dwell_passive = wpi_get_passive_dwell_time(sc, c);
/* Make sure they're valid. */
if (dwell_active > dwell_passive)
dwell_active = dwell_passive;
chan->active = htole16(dwell_active);
chan->passive = htole16(dwell_passive);
chan->dsp_gain = 0x6e; /* Default level */
if (IEEE80211_IS_CHAN_5GHZ(c))
chan->rf_gain = 0x3b;
else
chan->rf_gain = 0x28;
DPRINTF(sc, WPI_DEBUG_SCAN, "Scanning %u Passive: %d\n",
chan->chan, IEEE80211_IS_CHAN_PASSIVE(c));
hdr->nchan++;
if (hdr->nchan == 1 && sc->rxon.chan == chan->chan) {
/* XXX Force probe request transmission. */
memcpy(chan + 1, chan, sizeof (struct wpi_scan_chan));
chan++;
/* Reduce unnecessary delay. */
chan->flags = 0;
chan->passive = chan->active = hdr->quiet_time;
hdr->nchan++;
}
chan++;
buflen = (uint8_t *)chan - buf;
hdr->len = htole16(buflen);
DPRINTF(sc, WPI_DEBUG_CMD, "sending scan command nchan=%d\n",
hdr->nchan);
error = wpi_cmd(sc, WPI_CMD_SCAN, buf, buflen, 1);
free(buf, M_DEVBUF);
if (error != 0)
goto fail;
callout_reset(&sc->scan_timeout, 5*hz, wpi_scan_timeout, sc);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
fail: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
return error;
}
static int
wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_node *ni = vap->iv_bss;
struct ieee80211_channel *c = ni->ni_chan;
int error;
WPI_RXON_LOCK(sc);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
/* Update adapter configuration. */
sc->rxon.associd = 0;
sc->rxon.filter &= ~htole32(WPI_FILTER_BSS);
IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
sc->rxon.chan = ieee80211_chan2ieee(ic, c);
sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
if (IEEE80211_IS_CHAN_2GHZ(c))
sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
if (ic->ic_flags & IEEE80211_F_SHSLOT)
sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
if (IEEE80211_IS_CHAN_A(c)) {
sc->rxon.cck_mask = 0;
sc->rxon.ofdm_mask = 0x15;
} else if (IEEE80211_IS_CHAN_B(c)) {
sc->rxon.cck_mask = 0x03;
sc->rxon.ofdm_mask = 0;
} else {
/* Assume 802.11b/g. */
sc->rxon.cck_mask = 0x0f;
sc->rxon.ofdm_mask = 0x15;
}
DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n",
sc->rxon.chan, sc->rxon.flags, sc->rxon.cck_mask,
sc->rxon.ofdm_mask);
if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
device_printf(sc->sc_dev, "%s: could not send RXON\n",
__func__);
}
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
WPI_RXON_UNLOCK(sc);
return error;
}
static int
wpi_config_beacon(struct wpi_vap *wvp)
{
struct ieee80211com *ic = wvp->wv_vap.iv_ic;
struct ieee80211_beacon_offsets *bo = &wvp->wv_boff;
struct wpi_buf *bcn = &wvp->wv_bcbuf;
struct wpi_softc *sc = ic->ic_softc;
struct wpi_cmd_beacon *cmd = (struct wpi_cmd_beacon *)&bcn->data;
struct ieee80211_tim_ie *tie;
struct mbuf *m;
uint8_t *ptr;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
WPI_VAP_LOCK_ASSERT(wvp);
cmd->len = htole16(bcn->m->m_pkthdr.len);
cmd->plcp = (ic->ic_curmode == IEEE80211_MODE_11A) ?
wpi_ridx_to_plcp[WPI_RIDX_OFDM6] : wpi_ridx_to_plcp[WPI_RIDX_CCK1];
/* XXX seems to be unused */
if (*(bo->bo_tim) == IEEE80211_ELEMID_TIM) {
tie = (struct ieee80211_tim_ie *) bo->bo_tim;
ptr = mtod(bcn->m, uint8_t *);
cmd->tim = htole16(bo->bo_tim - ptr);
cmd->timsz = tie->tim_len;
}
/* Necessary for recursion in ieee80211_beacon_update(). */
m = bcn->m;
bcn->m = m_dup(m, M_NOWAIT);
if (bcn->m == NULL) {
device_printf(sc->sc_dev,
"%s: could not copy beacon frame\n", __func__);
error = ENOMEM;
goto end;
}
if ((error = wpi_cmd2(sc, bcn)) != 0) {
device_printf(sc->sc_dev,
"%s: could not update beacon frame, error %d", __func__,
error);
}
/* Restore mbuf. */
end: bcn->m = m;
return error;
}
static int
wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni)
{
struct wpi_vap *wvp = WPI_VAP(ni->ni_vap);
struct wpi_buf *bcn = &wvp->wv_bcbuf;
struct ieee80211_beacon_offsets *bo = &wvp->wv_boff;
struct mbuf *m;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
if (ni->ni_chan == IEEE80211_CHAN_ANYC)
return EINVAL;
m = ieee80211_beacon_alloc(ni, bo);
if (m == NULL) {
device_printf(sc->sc_dev,
"%s: could not allocate beacon frame\n", __func__);
return ENOMEM;
}
WPI_VAP_LOCK(wvp);
if (bcn->m != NULL)
m_freem(bcn->m);
bcn->m = m;
error = wpi_config_beacon(wvp);
WPI_VAP_UNLOCK(wvp);
return error;
}
static void
wpi_update_beacon(struct ieee80211vap *vap, int item)
{
struct wpi_softc *sc = vap->iv_ic->ic_softc;
struct wpi_vap *wvp = WPI_VAP(vap);
struct wpi_buf *bcn = &wvp->wv_bcbuf;
struct ieee80211_beacon_offsets *bo = &wvp->wv_boff;
struct ieee80211_node *ni = vap->iv_bss;
int mcast = 0;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
WPI_VAP_LOCK(wvp);
if (bcn->m == NULL) {
bcn->m = ieee80211_beacon_alloc(ni, bo);
if (bcn->m == NULL) {
device_printf(sc->sc_dev,
"%s: could not allocate beacon frame\n", __func__);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR,
__func__);
WPI_VAP_UNLOCK(wvp);
return;
}
}
WPI_VAP_UNLOCK(wvp);
if (item == IEEE80211_BEACON_TIM)
mcast = 1; /* TODO */
setbit(bo->bo_flags, item);
ieee80211_beacon_update(ni, bo, bcn->m, mcast);
WPI_VAP_LOCK(wvp);
wpi_config_beacon(wvp);
WPI_VAP_UNLOCK(wvp);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
}
static void
wpi_newassoc(struct ieee80211_node *ni, int isnew)
{
struct ieee80211vap *vap = ni->ni_vap;
struct wpi_softc *sc = ni->ni_ic->ic_softc;
struct wpi_node *wn = WPI_NODE(ni);
int error;
WPI_NT_LOCK(sc);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
if (vap->iv_opmode != IEEE80211_M_STA && wn->id == WPI_ID_UNDEFINED) {
if ((error = wpi_add_ibss_node(sc, ni)) != 0) {
device_printf(sc->sc_dev,
"%s: could not add IBSS node, error %d\n",
__func__, error);
}
}
WPI_NT_UNLOCK(sc);
}
static int
wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_node *ni = vap->iv_bss;
struct ieee80211_channel *c = ni->ni_chan;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
if (vap->iv_opmode == IEEE80211_M_MONITOR) {
/* Link LED blinks while monitoring. */
wpi_set_led(sc, WPI_LED_LINK, 5, 5);
return 0;
}
/* XXX kernel panic workaround */
if (c == IEEE80211_CHAN_ANYC) {
device_printf(sc->sc_dev, "%s: incomplete configuration\n",
__func__);
return EINVAL;
}
if ((error = wpi_set_timing(sc, ni)) != 0) {
device_printf(sc->sc_dev,
"%s: could not set timing, error %d\n", __func__, error);
return error;
}
/* Update adapter configuration. */
WPI_RXON_LOCK(sc);
IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
sc->rxon.associd = htole16(IEEE80211_NODE_AID(ni));
sc->rxon.chan = ieee80211_chan2ieee(ic, c);
sc->rxon.flags = htole32(WPI_RXON_TSF | WPI_RXON_CTS_TO_SELF);
if (IEEE80211_IS_CHAN_2GHZ(c))
sc->rxon.flags |= htole32(WPI_RXON_AUTO | WPI_RXON_24GHZ);
if (ic->ic_flags & IEEE80211_F_SHSLOT)
sc->rxon.flags |= htole32(WPI_RXON_SHSLOT);
if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
sc->rxon.flags |= htole32(WPI_RXON_SHPREAMBLE);
if (IEEE80211_IS_CHAN_A(c)) {
sc->rxon.cck_mask = 0;
sc->rxon.ofdm_mask = 0x15;
} else if (IEEE80211_IS_CHAN_B(c)) {
sc->rxon.cck_mask = 0x03;
sc->rxon.ofdm_mask = 0;
} else {
/* Assume 802.11b/g. */
sc->rxon.cck_mask = 0x0f;
sc->rxon.ofdm_mask = 0x15;
}
sc->rxon.filter |= htole32(WPI_FILTER_BSS);
DPRINTF(sc, WPI_DEBUG_STATE, "rxon chan %d flags %x\n",
sc->rxon.chan, sc->rxon.flags);
if ((error = wpi_send_rxon(sc, 0, 1)) != 0) {
device_printf(sc->sc_dev, "%s: could not send RXON\n",
__func__);
return error;
}
/* Start periodic calibration timer. */
callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc);
WPI_RXON_UNLOCK(sc);
if (vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_HOSTAP) {
if ((error = wpi_setup_beacon(sc, ni)) != 0) {
device_printf(sc->sc_dev,
"%s: could not setup beacon, error %d\n", __func__,
error);
return error;
}
}
if (vap->iv_opmode == IEEE80211_M_STA) {
/* Add BSS node. */
WPI_NT_LOCK(sc);
error = wpi_add_sta_node(sc, ni);
WPI_NT_UNLOCK(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not add BSS node, error %d\n", __func__,
error);
return error;
}
}
/* Link LED always on while associated. */
wpi_set_led(sc, WPI_LED_LINK, 0, 1);
/* Enable power-saving mode if requested by user. */
if ((vap->iv_flags & IEEE80211_F_PMGTON) &&
vap->iv_opmode != IEEE80211_M_IBSS)
(void)wpi_set_pslevel(sc, 0, 3, 1);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
return 0;
}
static int
wpi_load_key(struct ieee80211_node *ni, const struct ieee80211_key *k)
{
const struct ieee80211_cipher *cip = k->wk_cipher;
struct ieee80211vap *vap = ni->ni_vap;
struct wpi_softc *sc = ni->ni_ic->ic_softc;
struct wpi_node *wn = WPI_NODE(ni);
struct wpi_node_info node;
uint16_t kflags;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
if (wpi_check_node_entry(sc, wn->id) == 0) {
device_printf(sc->sc_dev, "%s: node does not exist\n",
__func__);
return 0;
}
switch (cip->ic_cipher) {
case IEEE80211_CIPHER_AES_CCM:
kflags = WPI_KFLAG_CCMP;
break;
default:
device_printf(sc->sc_dev, "%s: unknown cipher %d\n", __func__,
cip->ic_cipher);
return 0;
}
kflags |= WPI_KFLAG_KID(k->wk_keyix);
if (k->wk_flags & IEEE80211_KEY_GROUP)
kflags |= WPI_KFLAG_MULTICAST;
memset(&node, 0, sizeof node);
node.id = wn->id;
node.control = WPI_NODE_UPDATE;
node.flags = WPI_FLAG_KEY_SET;
node.kflags = htole16(kflags);
memcpy(node.key, k->wk_key, k->wk_keylen);
again:
DPRINTF(sc, WPI_DEBUG_KEY,
"%s: setting %s key id %d for node %d (%s)\n", __func__,
(kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast", k->wk_keyix,
node.id, ether_sprintf(ni->ni_macaddr));
error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
if (error != 0) {
device_printf(sc->sc_dev, "can't update node info, error %d\n",
error);
return !error;
}
if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
kflags |= WPI_KFLAG_MULTICAST;
node.kflags = htole16(kflags);
goto again;
}
return 1;
}
static void
wpi_load_key_cb(void *arg, struct ieee80211_node *ni)
{
const struct ieee80211_key *k = arg;
struct ieee80211vap *vap = ni->ni_vap;
struct wpi_softc *sc = ni->ni_ic->ic_softc;
struct wpi_node *wn = WPI_NODE(ni);
int error;
if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED)
return;
WPI_NT_LOCK(sc);
error = wpi_load_key(ni, k);
WPI_NT_UNLOCK(sc);
if (error == 0) {
device_printf(sc->sc_dev, "%s: error while setting key\n",
__func__);
}
}
static int
wpi_set_global_keys(struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_key *wk = &vap->iv_nw_keys[0];
int error = 1;
for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID] && error; wk++)
if (wk->wk_keyix != IEEE80211_KEYIX_NONE)
error = wpi_load_key(ni, wk);
return !error;
}
static int
wpi_del_key(struct ieee80211_node *ni, const struct ieee80211_key *k)
{
struct ieee80211vap *vap = ni->ni_vap;
struct wpi_softc *sc = ni->ni_ic->ic_softc;
struct wpi_node *wn = WPI_NODE(ni);
struct wpi_node_info node;
uint16_t kflags;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
if (wpi_check_node_entry(sc, wn->id) == 0) {
DPRINTF(sc, WPI_DEBUG_KEY, "%s: node was removed\n", __func__);
return 1; /* Nothing to do. */
}
kflags = WPI_KFLAG_KID(k->wk_keyix);
if (k->wk_flags & IEEE80211_KEY_GROUP)
kflags |= WPI_KFLAG_MULTICAST;
memset(&node, 0, sizeof node);
node.id = wn->id;
node.control = WPI_NODE_UPDATE;
node.flags = WPI_FLAG_KEY_SET;
node.kflags = htole16(kflags);
again:
DPRINTF(sc, WPI_DEBUG_KEY, "%s: deleting %s key %d for node %d (%s)\n",
__func__, (kflags & WPI_KFLAG_MULTICAST) ? "group" : "ucast",
k->wk_keyix, node.id, ether_sprintf(ni->ni_macaddr));
error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1);
if (error != 0) {
device_printf(sc->sc_dev, "can't update node info, error %d\n",
error);
return !error;
}
if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
kflags |= WPI_KFLAG_MULTICAST;
node.kflags = htole16(kflags);
goto again;
}
return 1;
}
static void
wpi_del_key_cb(void *arg, struct ieee80211_node *ni)
{
const struct ieee80211_key *k = arg;
struct ieee80211vap *vap = ni->ni_vap;
struct wpi_softc *sc = ni->ni_ic->ic_softc;
struct wpi_node *wn = WPI_NODE(ni);
int error;
if (vap->iv_bss == ni && wn->id == WPI_ID_UNDEFINED)
return;
WPI_NT_LOCK(sc);
error = wpi_del_key(ni, k);
WPI_NT_UNLOCK(sc);
if (error == 0) {
device_printf(sc->sc_dev, "%s: error while deleting key\n",
__func__);
}
}
static int
wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k,
int set)
{
struct ieee80211com *ic = vap->iv_ic;
struct wpi_softc *sc = ic->ic_softc;
struct wpi_vap *wvp = WPI_VAP(vap);
struct ieee80211_node *ni;
int error, ni_ref = 0;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
/* Not for us. */
return 1;
}
if (!(k->wk_flags & IEEE80211_KEY_RECV)) {
/* XMIT keys are handled in wpi_tx_data(). */
return 1;
}
/* Handle group keys. */
if (&vap->iv_nw_keys[0] <= k &&
k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
WPI_NT_LOCK(sc);
if (set)
wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix);
else
wvp->wv_gtk &= ~WPI_VAP_KEY(k->wk_keyix);
WPI_NT_UNLOCK(sc);
if (vap->iv_state == IEEE80211_S_RUN) {
ieee80211_iterate_nodes(&ic->ic_sta,
set ? wpi_load_key_cb : wpi_del_key_cb,
__DECONST(void *, k));
}
return 1;
}
switch (vap->iv_opmode) {
case IEEE80211_M_STA:
ni = vap->iv_bss;
break;
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
case IEEE80211_M_HOSTAP:
ni = ieee80211_find_vap_node(&ic->ic_sta, vap, k->wk_macaddr);
if (ni == NULL)
return 0; /* should not happen */
ni_ref = 1;
break;
default:
device_printf(sc->sc_dev, "%s: unknown opmode %d\n", __func__,
vap->iv_opmode);
return 0;
}
WPI_NT_LOCK(sc);
if (set)
error = wpi_load_key(ni, k);
else
error = wpi_del_key(ni, k);
WPI_NT_UNLOCK(sc);
if (ni_ref)
ieee80211_node_decref(ni);
return error;
}
static int
wpi_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
const uint8_t mac[IEEE80211_ADDR_LEN])
{
return wpi_process_key(vap, k, 1);
}
static int
wpi_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
{
return wpi_process_key(vap, k, 0);
}
/*
* This function is called after the runtime firmware notifies us of its
* readiness (called in a process context).
*/
static int
wpi_post_alive(struct wpi_softc *sc)
{
int ntries, error;
/* Check (again) that the radio is not disabled. */
if ((error = wpi_nic_lock(sc)) != 0)
return error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
/* NB: Runtime firmware must be up and running. */
if (!(wpi_prph_read(sc, WPI_APMG_RFKILL) & 1)) {
device_printf(sc->sc_dev,
"RF switch: radio disabled (%s)\n", __func__);
wpi_nic_unlock(sc);
return EPERM; /* :-) */
}
wpi_nic_unlock(sc);
/* Wait for thermal sensor to calibrate. */
for (ntries = 0; ntries < 1000; ntries++) {
if ((sc->temp = (int)WPI_READ(sc, WPI_UCODE_GP2)) != 0)
break;
DELAY(10);
}
if (ntries == 1000) {
device_printf(sc->sc_dev,
"timeout waiting for thermal sensor calibration\n");
return ETIMEDOUT;
}
DPRINTF(sc, WPI_DEBUG_TEMP, "temperature %d\n", sc->temp);
return 0;
}
/*
* The firmware boot code is small and is intended to be copied directly into
* the NIC internal memory (no DMA transfer).
*/
static int
wpi_load_bootcode(struct wpi_softc *sc, const uint8_t *ucode, int size)
{
int error, ntries;
DPRINTF(sc, WPI_DEBUG_HW, "Loading microcode size 0x%x\n", size);
size /= sizeof (uint32_t);
if ((error = wpi_nic_lock(sc)) != 0)
return error;
/* Copy microcode image into NIC memory. */
wpi_prph_write_region_4(sc, WPI_BSM_SRAM_BASE,
(const uint32_t *)ucode, size);
wpi_prph_write(sc, WPI_BSM_WR_MEM_SRC, 0);
wpi_prph_write(sc, WPI_BSM_WR_MEM_DST, WPI_FW_TEXT_BASE);
wpi_prph_write(sc, WPI_BSM_WR_DWCOUNT, size);
/* Start boot load now. */
wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START);
/* Wait for transfer to complete. */
for (ntries = 0; ntries < 1000; ntries++) {
uint32_t status = WPI_READ(sc, WPI_FH_TX_STATUS);
DPRINTF(sc, WPI_DEBUG_HW,
"firmware status=0x%x, val=0x%x, result=0x%x\n", status,
WPI_FH_TX_STATUS_IDLE(6),
status & WPI_FH_TX_STATUS_IDLE(6));
if (status & WPI_FH_TX_STATUS_IDLE(6)) {
DPRINTF(sc, WPI_DEBUG_HW,
"Status Match! - ntries = %d\n", ntries);
break;
}
DELAY(10);
}
if (ntries == 1000) {
device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
__func__);
wpi_nic_unlock(sc);
return ETIMEDOUT;
}
/* Enable boot after power up. */
wpi_prph_write(sc, WPI_BSM_WR_CTRL, WPI_BSM_WR_CTRL_START_EN);
wpi_nic_unlock(sc);
return 0;
}
static int
wpi_load_firmware(struct wpi_softc *sc)
{
struct wpi_fw_info *fw = &sc->fw;
struct wpi_dma_info *dma = &sc->fw_dma;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
/* Copy initialization sections into pre-allocated DMA-safe memory. */
memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
/* Tell adapter where to find initialization sections. */
if ((error = wpi_nic_lock(sc)) != 0)
return error;
wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->init.datasz);
wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
dma->paddr + WPI_FW_DATA_MAXSZ);
wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
wpi_nic_unlock(sc);
/* Load firmware boot code. */
error = wpi_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
if (error != 0) {
device_printf(sc->sc_dev, "%s: could not load boot firmware\n",
__func__);
return error;
}
/* Now press "execute". */
WPI_WRITE(sc, WPI_RESET, 0);
/* Wait at most one second for first alive notification. */
if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
device_printf(sc->sc_dev,
"%s: timeout waiting for adapter to initialize, error %d\n",
__func__, error);
return error;
}
/* Copy runtime sections into pre-allocated DMA-safe memory. */
memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
memcpy(dma->vaddr + WPI_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
/* Tell adapter where to find runtime sections. */
if ((error = wpi_nic_lock(sc)) != 0)
return error;
wpi_prph_write(sc, WPI_BSM_DRAM_DATA_ADDR, dma->paddr);
wpi_prph_write(sc, WPI_BSM_DRAM_DATA_SIZE, fw->main.datasz);
wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_ADDR,
dma->paddr + WPI_FW_DATA_MAXSZ);
wpi_prph_write(sc, WPI_BSM_DRAM_TEXT_SIZE,
WPI_FW_UPDATED | fw->main.textsz);
wpi_nic_unlock(sc);
return 0;
}
static int
wpi_read_firmware(struct wpi_softc *sc)
{
const struct firmware *fp;
struct wpi_fw_info *fw = &sc->fw;
const struct wpi_firmware_hdr *hdr;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
DPRINTF(sc, WPI_DEBUG_FIRMWARE,
"Attempting Loading Firmware from %s module\n", WPI_FW_NAME);
WPI_UNLOCK(sc);
fp = firmware_get(WPI_FW_NAME);
WPI_LOCK(sc);
if (fp == NULL) {
device_printf(sc->sc_dev,
"could not load firmware image '%s'\n", WPI_FW_NAME);
return EINVAL;
}
sc->fw_fp = fp;
if (fp->datasize < sizeof (struct wpi_firmware_hdr)) {
device_printf(sc->sc_dev,
"firmware file too short: %zu bytes\n", fp->datasize);
error = EINVAL;
goto fail;
}
fw->size = fp->datasize;
fw->data = (const uint8_t *)fp->data;
/* Extract firmware header information. */
hdr = (const struct wpi_firmware_hdr *)fw->data;
/* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW |
|HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */
fw->main.textsz = le32toh(hdr->rtextsz);
fw->main.datasz = le32toh(hdr->rdatasz);
fw->init.textsz = le32toh(hdr->itextsz);
fw->init.datasz = le32toh(hdr->idatasz);
fw->boot.textsz = le32toh(hdr->btextsz);
fw->boot.datasz = 0;
/* Sanity-check firmware header. */
if (fw->main.textsz > WPI_FW_TEXT_MAXSZ ||
fw->main.datasz > WPI_FW_DATA_MAXSZ ||
fw->init.textsz > WPI_FW_TEXT_MAXSZ ||
fw->init.datasz > WPI_FW_DATA_MAXSZ ||
fw->boot.textsz > WPI_FW_BOOT_TEXT_MAXSZ ||
(fw->boot.textsz & 3) != 0) {
device_printf(sc->sc_dev, "invalid firmware header\n");
error = EINVAL;
goto fail;
}
/* Check that all firmware sections fit. */
if (fw->size < sizeof (*hdr) + fw->main.textsz + fw->main.datasz +
fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
device_printf(sc->sc_dev,
"firmware file too short: %zu bytes\n", fw->size);
error = EINVAL;
goto fail;
}
/* Get pointers to firmware sections. */
fw->main.text = (const uint8_t *)(hdr + 1);
fw->main.data = fw->main.text + fw->main.textsz;
fw->init.text = fw->main.data + fw->main.datasz;
fw->init.data = fw->init.text + fw->init.textsz;
fw->boot.text = fw->init.data + fw->init.datasz;
DPRINTF(sc, WPI_DEBUG_FIRMWARE,
"Firmware Version: Major %d, Minor %d, Driver %d, \n"
"runtime (text: %u, data: %u) init (text: %u, data %u) "
"boot (text %u)\n", hdr->major, hdr->minor, le32toh(hdr->driver),
fw->main.textsz, fw->main.datasz,
fw->init.textsz, fw->init.datasz, fw->boot.textsz);
DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.text %p\n", fw->main.text);
DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->main.data %p\n", fw->main.data);
DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.text %p\n", fw->init.text);
DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->init.data %p\n", fw->init.data);
DPRINTF(sc, WPI_DEBUG_FIRMWARE, "fw->boot.text %p\n", fw->boot.text);
return 0;
fail: wpi_unload_firmware(sc);
return error;
}
/**
* Free the referenced firmware image
*/
static void
wpi_unload_firmware(struct wpi_softc *sc)
{
if (sc->fw_fp != NULL) {
firmware_put(sc->fw_fp, FIRMWARE_UNLOAD);
sc->fw_fp = NULL;
}
}
static int
wpi_clock_wait(struct wpi_softc *sc)
{
int ntries;
/* Set "initialization complete" bit. */
WPI_SETBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
/* Wait for clock stabilization. */
for (ntries = 0; ntries < 2500; ntries++) {
if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_MAC_CLOCK_READY)
return 0;
DELAY(100);
}
device_printf(sc->sc_dev,
"%s: timeout waiting for clock stabilization\n", __func__);
return ETIMEDOUT;
}
static int
wpi_apm_init(struct wpi_softc *sc)
{
uint32_t reg;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
/* Disable L0s exit timer (NMI bug workaround). */
WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_DIS_L0S_TIMER);
/* Don't wait for ICH L0s (ICH bug workaround). */
WPI_SETBITS(sc, WPI_GIO_CHICKEN, WPI_GIO_CHICKEN_L1A_NO_L0S_RX);
/* Set FH wait threshold to max (HW bug under stress workaround). */
WPI_SETBITS(sc, WPI_DBG_HPET_MEM, 0xffff0000);
/* Retrieve PCIe Active State Power Management (ASPM). */
reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + 0x10, 1);
/* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
if (reg & 0x02) /* L1 Entry enabled. */
WPI_SETBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
else
WPI_CLRBITS(sc, WPI_GIO, WPI_GIO_L0S_ENA);
WPI_SETBITS(sc, WPI_ANA_PLL, WPI_ANA_PLL_INIT);
/* Wait for clock stabilization before accessing prph. */
if ((error = wpi_clock_wait(sc)) != 0)
return error;
if ((error = wpi_nic_lock(sc)) != 0)
return error;
/* Cleanup. */
wpi_prph_write(sc, WPI_APMG_CLK_DIS, 0x00000400);
wpi_prph_clrbits(sc, WPI_APMG_PS, 0x00000200);
/* Enable DMA and BSM (Bootstrap State Machine). */
wpi_prph_write(sc, WPI_APMG_CLK_EN,
WPI_APMG_CLK_CTRL_DMA_CLK_RQT | WPI_APMG_CLK_CTRL_BSM_CLK_RQT);
DELAY(20);
/* Disable L1-Active. */
wpi_prph_setbits(sc, WPI_APMG_PCI_STT, WPI_APMG_PCI_STT_L1A_DIS);
wpi_nic_unlock(sc);
return 0;
}
static void
wpi_apm_stop_master(struct wpi_softc *sc)
{
int ntries;
/* Stop busmaster DMA activity. */
WPI_SETBITS(sc, WPI_RESET, WPI_RESET_STOP_MASTER);
if ((WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_PS_MASK) ==
WPI_GP_CNTRL_MAC_PS)
return; /* Already asleep. */
for (ntries = 0; ntries < 100; ntries++) {
if (WPI_READ(sc, WPI_RESET) & WPI_RESET_MASTER_DISABLED)
return;
DELAY(10);
}
device_printf(sc->sc_dev, "%s: timeout waiting for master\n",
__func__);
}
static void
wpi_apm_stop(struct wpi_softc *sc)
{
wpi_apm_stop_master(sc);
/* Reset the entire device. */
WPI_SETBITS(sc, WPI_RESET, WPI_RESET_SW);
DELAY(10);
/* Clear "initialization complete" bit. */
WPI_CLRBITS(sc, WPI_GP_CNTRL, WPI_GP_CNTRL_INIT_DONE);
}
static void
wpi_nic_config(struct wpi_softc *sc)
{
uint32_t rev;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
/* voodoo from the Linux "driver".. */
rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1);
if ((rev & 0xc0) == 0x40)
WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MB);
else if (!(rev & 0x80))
WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_ALM_MM);
if (sc->cap == 0x80)
WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_SKU_MRC);
if ((sc->rev & 0xf0) == 0xd0)
WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
else
WPI_CLRBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_REV_D);
if (sc->type > 1)
WPI_SETBITS(sc, WPI_HW_IF_CONFIG, WPI_HW_IF_CONFIG_TYPE_B);
}
static int
wpi_hw_init(struct wpi_softc *sc)
{
int chnl, ntries, error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
/* Clear pending interrupts. */
WPI_WRITE(sc, WPI_INT, 0xffffffff);
if ((error = wpi_apm_init(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not power ON adapter, error %d\n", __func__,
error);
return error;
}
/* Select VMAIN power source. */
if ((error = wpi_nic_lock(sc)) != 0)
return error;
wpi_prph_clrbits(sc, WPI_APMG_PS, WPI_APMG_PS_PWR_SRC_MASK);
wpi_nic_unlock(sc);
/* Spin until VMAIN gets selected. */
for (ntries = 0; ntries < 5000; ntries++) {
if (WPI_READ(sc, WPI_GPIO_IN) & WPI_GPIO_IN_VMAIN)
break;
DELAY(10);
}
if (ntries == 5000) {
device_printf(sc->sc_dev, "timeout selecting power source\n");
return ETIMEDOUT;
}
/* Perform adapter initialization. */
wpi_nic_config(sc);
/* Initialize RX ring. */
if ((error = wpi_nic_lock(sc)) != 0)
return error;
/* Set physical address of RX ring. */
WPI_WRITE(sc, WPI_FH_RX_BASE, sc->rxq.desc_dma.paddr);
/* Set physical address of RX read pointer. */
WPI_WRITE(sc, WPI_FH_RX_RPTR_ADDR, sc->shared_dma.paddr +
offsetof(struct wpi_shared, next));
WPI_WRITE(sc, WPI_FH_RX_WPTR, 0);
/* Enable RX. */
WPI_WRITE(sc, WPI_FH_RX_CONFIG,
WPI_FH_RX_CONFIG_DMA_ENA |
WPI_FH_RX_CONFIG_RDRBD_ENA |
WPI_FH_RX_CONFIG_WRSTATUS_ENA |
WPI_FH_RX_CONFIG_MAXFRAG |
WPI_FH_RX_CONFIG_NRBD(WPI_RX_RING_COUNT_LOG) |
WPI_FH_RX_CONFIG_IRQ_DST_HOST |
WPI_FH_RX_CONFIG_IRQ_TIMEOUT(1));
(void)WPI_READ(sc, WPI_FH_RSSR_TBL); /* barrier */
wpi_nic_unlock(sc);
WPI_WRITE(sc, WPI_FH_RX_WPTR, (WPI_RX_RING_COUNT - 1) & ~7);
/* Initialize TX rings. */
if ((error = wpi_nic_lock(sc)) != 0)
return error;
wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 2); /* bypass mode */
wpi_prph_write(sc, WPI_ALM_SCHED_ARASTAT, 1); /* enable RA0 */
/* Enable all 6 TX rings. */
wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0x3f);
wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE1, 0x10000);
wpi_prph_write(sc, WPI_ALM_SCHED_SBYPASS_MODE2, 0x30002);
wpi_prph_write(sc, WPI_ALM_SCHED_TXF4MF, 4);
wpi_prph_write(sc, WPI_ALM_SCHED_TXF5MF, 5);
/* Set physical address of TX rings. */
WPI_WRITE(sc, WPI_FH_TX_BASE, sc->shared_dma.paddr);
WPI_WRITE(sc, WPI_FH_MSG_CONFIG, 0xffff05a5);
/* Enable all DMA channels. */
for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
WPI_WRITE(sc, WPI_FH_CBBC_CTRL(chnl), 0);
WPI_WRITE(sc, WPI_FH_CBBC_BASE(chnl), 0);
WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0x80200008);
}
wpi_nic_unlock(sc);
(void)WPI_READ(sc, WPI_FH_TX_BASE); /* barrier */
/* Clear "radio off" and "commands blocked" bits. */
WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_CMD_BLOCKED);
/* Clear pending interrupts. */
WPI_WRITE(sc, WPI_INT, 0xffffffff);
/* Enable interrupts. */
WPI_WRITE(sc, WPI_INT_MASK, WPI_INT_MASK_DEF);
/* _Really_ make sure "radio off" bit is cleared! */
WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
WPI_WRITE(sc, WPI_UCODE_GP1_CLR, WPI_UCODE_GP1_RFKILL);
if ((error = wpi_load_firmware(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not load firmware, error %d\n", __func__,
error);
return error;
}
/* Wait at most one second for firmware alive notification. */
if ((error = mtx_sleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) {
device_printf(sc->sc_dev,
"%s: timeout waiting for adapter to initialize, error %d\n",
__func__, error);
return error;
}
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
/* Do post-firmware initialization. */
return wpi_post_alive(sc);
}
static void
wpi_hw_stop(struct wpi_softc *sc)
{
int chnl, qid, ntries;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
if (WPI_READ(sc, WPI_UCODE_GP1) & WPI_UCODE_GP1_MAC_SLEEP)
wpi_nic_lock(sc);
WPI_WRITE(sc, WPI_RESET, WPI_RESET_NEVO);
/* Disable interrupts. */
WPI_WRITE(sc, WPI_INT_MASK, 0);
WPI_WRITE(sc, WPI_INT, 0xffffffff);
WPI_WRITE(sc, WPI_FH_INT, 0xffffffff);
/* Make sure we no longer hold the NIC lock. */
wpi_nic_unlock(sc);
if (wpi_nic_lock(sc) == 0) {
/* Stop TX scheduler. */
wpi_prph_write(sc, WPI_ALM_SCHED_MODE, 0);
wpi_prph_write(sc, WPI_ALM_SCHED_TXFACT, 0);
/* Stop all DMA channels. */
for (chnl = 0; chnl < WPI_NDMACHNLS; chnl++) {
WPI_WRITE(sc, WPI_FH_TX_CONFIG(chnl), 0);
for (ntries = 0; ntries < 200; ntries++) {
if (WPI_READ(sc, WPI_FH_TX_STATUS) &
WPI_FH_TX_STATUS_IDLE(chnl))
break;
DELAY(10);
}
}
wpi_nic_unlock(sc);
}
/* Stop RX ring. */
wpi_reset_rx_ring(sc);
/* Reset all TX rings. */
for (qid = 0; qid < WPI_NTXQUEUES; qid++)
wpi_reset_tx_ring(sc, &sc->txq[qid]);
if (wpi_nic_lock(sc) == 0) {
wpi_prph_write(sc, WPI_APMG_CLK_DIS,
WPI_APMG_CLK_CTRL_DMA_CLK_RQT);
wpi_nic_unlock(sc);
}
DELAY(5);
/* Power OFF adapter. */
wpi_apm_stop(sc);
}
static void
wpi_radio_on(void *arg0, int pending)
{
struct wpi_softc *sc = arg0;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
device_printf(sc->sc_dev, "RF switch: radio enabled\n");
- if (vap != NULL) {
- wpi_init(sc);
- ieee80211_init(vap);
- }
+ WPI_LOCK(sc);
+ callout_stop(&sc->watchdog_rfkill);
+ WPI_UNLOCK(sc);
- if (WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL) {
- WPI_LOCK(sc);
- callout_stop(&sc->watchdog_rfkill);
- WPI_UNLOCK(sc);
- }
+ if (vap != NULL)
+ ieee80211_init(vap);
}
static void
wpi_radio_off(void *arg0, int pending)
{
struct wpi_softc *sc = arg0;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
device_printf(sc->sc_dev, "RF switch: radio disabled\n");
+ ieee80211_notify_radio(ic, 0);
wpi_stop(sc);
if (vap != NULL)
ieee80211_stop(vap);
WPI_LOCK(sc);
callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill, sc);
WPI_UNLOCK(sc);
}
-static void
-wpi_init(void *arg)
+static int
+wpi_init(struct wpi_softc *sc)
{
- struct wpi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
- int error;
+ int error = 0;
WPI_LOCK(sc);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_BEGIN, __func__);
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
+ if (sc->sc_running != 0)
goto end;
/* Check that the radio is not disabled by hardware switch. */
if (!(WPI_READ(sc, WPI_GP_CNTRL) & WPI_GP_CNTRL_RFKILL)) {
device_printf(sc->sc_dev,
"RF switch: radio disabled (%s)\n", __func__);
callout_reset(&sc->watchdog_rfkill, hz, wpi_watchdog_rfkill,
sc);
+ error = EINPROGRESS;
goto end;
}
/* Read firmware images from the filesystem. */
if ((error = wpi_read_firmware(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not read firmware, error %d\n", __func__,
error);
- goto fail;
+ goto end;
}
+ sc->sc_running = 1;
+
/* Initialize hardware and upload firmware. */
error = wpi_hw_init(sc);
wpi_unload_firmware(sc);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: could not initialize hardware, error %d\n", __func__,
error);
goto fail;
}
/* Configure adapter now that it is ready. */
- sc->txq_active = 1;
if ((error = wpi_config(sc)) != 0) {
device_printf(sc->sc_dev,
"%s: could not configure device, error %d\n", __func__,
error);
goto fail;
}
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- IF_UNLOCK(&ifp->if_snd);
-
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END, __func__);
WPI_UNLOCK(sc);
- ieee80211_start_all(ic);
+ return 0;
- return;
-
fail: wpi_stop_locked(sc);
+
end: DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_END_ERR, __func__);
WPI_UNLOCK(sc);
+
+ return error;
}
static void
wpi_stop_locked(struct wpi_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
WPI_LOCK_ASSERT(sc);
+ if (sc->sc_running == 0)
+ return;
+
+ WPI_TX_LOCK(sc);
WPI_TXQ_LOCK(sc);
- sc->txq_active = 0;
+ sc->sc_running = 0;
WPI_TXQ_UNLOCK(sc);
+ WPI_TX_UNLOCK(sc);
WPI_TXQ_STATE_LOCK(sc);
callout_stop(&sc->tx_timeout);
WPI_TXQ_STATE_UNLOCK(sc);
WPI_RXON_LOCK(sc);
callout_stop(&sc->scan_timeout);
callout_stop(&sc->calib_to);
WPI_RXON_UNLOCK(sc);
- IF_LOCK(&ifp->if_snd);
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
- IF_UNLOCK(&ifp->if_snd);
-
/* Power OFF hardware. */
wpi_hw_stop(sc);
}
static void
wpi_stop(struct wpi_softc *sc)
{
WPI_LOCK(sc);
wpi_stop_locked(sc);
WPI_UNLOCK(sc);
}
/*
* Callback from net80211 to start a scan.
*/
static void
wpi_scan_start(struct ieee80211com *ic)
{
struct wpi_softc *sc = ic->ic_softc;
wpi_set_led(sc, WPI_LED_LINK, 20, 2);
}
/*
* Callback from net80211 to terminate a scan.
*/
static void
wpi_scan_end(struct ieee80211com *ic)
{
struct wpi_softc *sc = ic->ic_softc;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
if (vap->iv_state == IEEE80211_S_RUN)
wpi_set_led(sc, WPI_LED_LINK, 0, 1);
}
/**
* Called by the net80211 framework to indicate to the driver
* that the channel should be changed
*/
static void
wpi_set_channel(struct ieee80211com *ic)
{
const struct ieee80211_channel *c = ic->ic_curchan;
struct wpi_softc *sc = ic->ic_softc;
int error;
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
WPI_LOCK(sc);
sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq);
sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags);
WPI_UNLOCK(sc);
WPI_TX_LOCK(sc);
sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq);
sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags);
WPI_TX_UNLOCK(sc);
/*
* Only need to set the channel in Monitor mode. AP scanning and auth
* are already taken care of by their respective firmware commands.
*/
if (ic->ic_opmode == IEEE80211_M_MONITOR) {
WPI_RXON_LOCK(sc);
sc->rxon.chan = ieee80211_chan2ieee(ic, c);
if (IEEE80211_IS_CHAN_2GHZ(c)) {
sc->rxon.flags |= htole32(WPI_RXON_AUTO |
WPI_RXON_24GHZ);
} else {
sc->rxon.flags &= ~htole32(WPI_RXON_AUTO |
WPI_RXON_24GHZ);
}
if ((error = wpi_send_rxon(sc, 0, 1)) != 0)
device_printf(sc->sc_dev,
"%s: error %d setting channel\n", __func__,
error);
WPI_RXON_UNLOCK(sc);
}
}
/**
* Called by net80211 to indicate that we need to scan the current
* channel. The channel is previously be set via the wpi_set_channel
* callback.
*/
static void
wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
{
struct ieee80211vap *vap = ss->ss_vap;
struct ieee80211com *ic = vap->iv_ic;
struct wpi_softc *sc = ic->ic_softc;
int error;
WPI_RXON_LOCK(sc);
error = wpi_scan(sc, ic->ic_curchan);
WPI_RXON_UNLOCK(sc);
if (error != 0)
ieee80211_cancel_scan(vap);
}
/**
* Called by the net80211 framework to indicate
* the minimum dwell time has been met, terminate the scan.
* We don't actually terminate the scan as the firmware will notify
* us when it's finished and we have no way to interrupt it.
*/
static void
wpi_scan_mindwell(struct ieee80211_scan_state *ss)
{
/* NB: don't try to abort scan; wait for firmware to finish */
}
static void
wpi_hw_reset(void *arg, int pending)
{
struct wpi_softc *sc = arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
DPRINTF(sc, WPI_DEBUG_TRACE, TRACE_STR_DOING, __func__);
+ ieee80211_notify_radio(ic, 0);
if (vap != NULL && (ic->ic_flags & IEEE80211_F_SCAN))
ieee80211_cancel_scan(vap);
wpi_stop(sc);
- if (vap != NULL)
+ if (vap != NULL) {
ieee80211_stop(vap);
- wpi_init(sc);
- if (vap != NULL)
ieee80211_init(vap);
+ }
}
Index: head/sys/dev/wpi/if_wpivar.h
===================================================================
--- head/sys/dev/wpi/if_wpivar.h (revision 287196)
+++ head/sys/dev/wpi/if_wpivar.h (revision 287197)
@@ -1,301 +1,302 @@
/* $FreeBSD$ */
/*-
* Copyright (c) 2006,2007
* Damien Bergamini <damien.bergamini@free.fr>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
struct wpi_rx_radiotap_header {
struct ieee80211_radiotap_header wr_ihdr;
uint64_t wr_tsft;
uint8_t wr_flags;
uint8_t wr_rate;
uint16_t wr_chan_freq;
uint16_t wr_chan_flags;
int8_t wr_dbm_antsignal;
int8_t wr_dbm_antnoise;
uint8_t wr_antenna;
} __packed;
#define WPI_RX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_TSFT) | \
(1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | \
(1 << IEEE80211_RADIOTAP_ANTENNA))
struct wpi_tx_radiotap_header {
struct ieee80211_radiotap_header wt_ihdr;
uint8_t wt_flags;
uint8_t wt_rate;
uint16_t wt_chan_freq;
uint16_t wt_chan_flags;
} __packed;
#define WPI_TX_RADIOTAP_PRESENT \
((1 << IEEE80211_RADIOTAP_FLAGS) | \
(1 << IEEE80211_RADIOTAP_RATE) | \
(1 << IEEE80211_RADIOTAP_CHANNEL))
struct wpi_dma_info {
bus_dma_tag_t tag;
bus_dmamap_t map;
bus_addr_t paddr;
caddr_t vaddr;
bus_size_t size;
};
struct wpi_tx_data {
bus_dmamap_t map;
bus_addr_t cmd_paddr;
struct mbuf *m;
struct ieee80211_node *ni;
};
struct wpi_tx_ring {
struct wpi_dma_info desc_dma;
struct wpi_dma_info cmd_dma;
struct wpi_tx_desc *desc;
struct wpi_tx_cmd *cmd;
struct wpi_tx_data data[WPI_TX_RING_COUNT];
bus_dma_tag_t data_dmat;
+ struct mbufq snd;
int qid;
int queued;
int cur;
int update;
};
struct wpi_rx_data {
struct mbuf *m;
bus_dmamap_t map;
};
struct wpi_rx_ring {
struct wpi_dma_info desc_dma;
uint32_t *desc;
struct wpi_rx_data data[WPI_RX_RING_COUNT];
bus_dma_tag_t data_dmat;
int cur;
int update;
};
struct wpi_node {
struct ieee80211_node ni; /* must be the first */
uint8_t id;
};
#define WPI_NODE(ni) ((struct wpi_node *)(ni))
struct wpi_power_sample {
uint8_t index;
int8_t power;
};
struct wpi_power_group {
#define WPI_SAMPLES_COUNT 5
struct wpi_power_sample samples[WPI_SAMPLES_COUNT];
uint8_t chan;
int8_t maxpwr;
int16_t temp;
};
struct wpi_buf {
uint8_t data[56]; /* sizeof(struct wpi_cmd_beacon) */
struct ieee80211_node *ni;
struct mbuf *m;
size_t size;
int code;
int ac;
};
struct wpi_vap {
struct ieee80211vap wv_vap;
struct wpi_buf wv_bcbuf;
struct ieee80211_beacon_offsets wv_boff;
struct mtx wv_mtx;
uint32_t wv_gtk;
#define WPI_VAP_KEY(kid) (1 << kid)
int (*wv_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
void (*wv_recv_mgmt)(struct ieee80211_node *,
struct mbuf *, int,
const struct ieee80211_rx_stats *,
int, int);
};
#define WPI_VAP(vap) ((struct wpi_vap *)(vap))
#define WPI_VAP_LOCK_INIT(_wvp) \
mtx_init(&(_wvp)->wv_mtx, "lock for wv_bcbuf/wv_boff structures", \
NULL, MTX_DEF)
#define WPI_VAP_LOCK(_wvp) mtx_lock(&(_wvp)->wv_mtx)
#define WPI_VAP_UNLOCK(_wvp) mtx_unlock(&(_wvp)->wv_mtx)
#define WPI_VAP_LOCK_ASSERT(_wvp) mtx_assert(&(_wvp)->wv_mtx, MA_OWNED)
#define WPI_VAP_LOCK_DESTROY(_wvp) mtx_destroy(&(_wvp)->wv_mtx)
struct wpi_fw_part {
const uint8_t *text;
uint32_t textsz;
const uint8_t *data;
uint32_t datasz;
};
struct wpi_fw_info {
const uint8_t *data;
size_t size;
struct wpi_fw_part init;
struct wpi_fw_part main;
struct wpi_fw_part boot;
};
struct wpi_softc {
device_t sc_dev;
-
- struct ifnet *sc_ifp;
int sc_debug;
int sc_flags;
#define WPI_PS_PATH (1 << 0)
+ int sc_running;
struct mtx sc_mtx;
+ struct ieee80211com sc_ic;
+
struct mtx tx_mtx;
/* Shared area. */
struct wpi_dma_info shared_dma;
struct wpi_shared *shared;
struct wpi_tx_ring txq[WPI_NTXQUEUES];
struct mtx txq_mtx;
struct mtx txq_state_mtx;
- uint32_t txq_active;
struct wpi_rx_ring rxq;
uint64_t rx_tstamp;
/* TX Thermal Callibration. */
struct callout calib_to;
int calib_cnt;
struct callout scan_timeout;
struct callout tx_timeout;
/* Watch dog timer. */
struct callout watchdog_rfkill;
/* Firmware image. */
struct wpi_fw_info fw;
uint32_t errptr;
struct resource *irq;
struct resource *mem;
bus_space_tag_t sc_st;
bus_space_handle_t sc_sh;
void *sc_ih;
bus_size_t sc_sz;
int sc_cap_off; /* PCIe Capabilities. */
struct wpi_rxon rxon;
struct mtx rxon_mtx;
int temp;
uint32_t qfullmsk;
uint32_t nodesmsk;
struct mtx nt_mtx;
void (*sc_node_free)(struct ieee80211_node *);
void (*sc_update_rx_ring)(struct wpi_softc *);
void (*sc_update_tx_ring)(struct wpi_softc *,
struct wpi_tx_ring *);
struct wpi_rx_radiotap_header sc_rxtap;
struct wpi_tx_radiotap_header sc_txtap;
/* Firmware image. */
const struct firmware *fw_fp;
/* Firmware DMA transfer. */
struct wpi_dma_info fw_dma;
/* Tasks used by the driver. */
struct task sc_reinittask;
struct task sc_radiooff_task;
struct task sc_radioon_task;
struct task sc_start_task;
/* Taskqueue */
struct taskqueue *sc_tq;
/* Eeprom info. */
uint8_t cap;
uint16_t rev;
uint8_t type;
struct wpi_eeprom_chan
eeprom_channels[WPI_CHAN_BANDS_COUNT][WPI_MAX_CHAN_PER_BAND];
struct wpi_power_group groups[WPI_POWER_GROUPS_COUNT];
int8_t maxpwr[IEEE80211_CHAN_MAX];
char domain[4]; /* Regulatory domain. */
};
/*
* Locking order:
* 1. WPI_LOCK;
* 2. WPI_RXON_LOCK;
* 3. WPI_TX_LOCK;
* 4. WPI_NT_LOCK / WPI_VAP_LOCK;
* 5. WPI_TXQ_LOCK;
* 6. WPI_TXQ_STATE_LOCK;
*/
#define WPI_LOCK_INIT(_sc) \
mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
MTX_NETWORK_LOCK, MTX_DEF)
#define WPI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define WPI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define WPI_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
#define WPI_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
#define WPI_RXON_LOCK_INIT(_sc) \
mtx_init(&(_sc)->rxon_mtx, "lock for wpi_rxon structure", NULL, MTX_DEF)
#define WPI_RXON_LOCK(_sc) mtx_lock(&(_sc)->rxon_mtx)
#define WPI_RXON_UNLOCK(_sc) mtx_unlock(&(_sc)->rxon_mtx)
#define WPI_RXON_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rxon_mtx, MA_OWNED)
#define WPI_RXON_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rxon_mtx)
#define WPI_TX_LOCK_INIT(_sc) \
mtx_init(&(_sc)->tx_mtx, "tx path lock", NULL, MTX_DEF)
#define WPI_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx)
#define WPI_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx)
#define WPI_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx)
#define WPI_NT_LOCK_INIT(_sc) \
mtx_init(&(_sc)->nt_mtx, "node table lock", NULL, MTX_DEF)
#define WPI_NT_LOCK(_sc) mtx_lock(&(_sc)->nt_mtx)
#define WPI_NT_UNLOCK(_sc) mtx_unlock(&(_sc)->nt_mtx)
#define WPI_NT_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->nt_mtx)
#define WPI_TXQ_LOCK_INIT(_sc) \
mtx_init(&(_sc)->txq_mtx, "txq/cmdq lock", NULL, MTX_DEF)
#define WPI_TXQ_LOCK(_sc) mtx_lock(&(_sc)->txq_mtx)
#define WPI_TXQ_UNLOCK(_sc) mtx_unlock(&(_sc)->txq_mtx)
#define WPI_TXQ_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->txq_mtx)
#define WPI_TXQ_STATE_LOCK_INIT(_sc) \
mtx_init(&(_sc)->txq_state_mtx, "txq state lock", NULL, MTX_DEF)
#define WPI_TXQ_STATE_LOCK(_sc) mtx_lock(&(_sc)->txq_state_mtx)
#define WPI_TXQ_STATE_UNLOCK(_sc) mtx_unlock(&(_sc)->txq_state_mtx)
#define WPI_TXQ_STATE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->txq_state_mtx)
Index: head/sys/dev/wtap/if_wtap.c
===================================================================
--- head/sys/dev/wtap/if_wtap.c (revision 287196)
+++ head/sys/dev/wtap/if_wtap.c (revision 287197)
@@ -1,921 +1,746 @@
/*-
* Copyright (c) 2010-2011 Monthadar Al Jaberi, TerraNet AB
* All rights reserved.
*
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
#include "if_wtapvar.h"
#include <sys/uio.h> /* uio struct */
#include <sys/jail.h>
#include <net/if_var.h>
#include <net/vnet.h>
#include <net80211/ieee80211_ratectl.h>
#include "if_medium.h"
/*
* This _requires_ vimage to be useful.
*/
#ifndef VIMAGE
#error if_wtap requires VIMAGE.
#endif /* VIMAGE */
/* device for IOCTL and read/write for debuggin purposes */
/* Function prototypes */
static d_open_t wtap_node_open;
static d_close_t wtap_node_close;
static d_write_t wtap_node_write;
static d_ioctl_t wtap_node_ioctl;
static struct cdevsw wtap_cdevsw = {
.d_version = D_VERSION,
.d_flags = 0,
.d_open = wtap_node_open,
.d_close = wtap_node_close,
.d_write = wtap_node_write,
.d_ioctl = wtap_node_ioctl,
.d_name = "wtapnode",
};
static int
wtap_node_open(struct cdev *dev, int oflags, int devtype, struct thread *p)
{
int err = 0;
uprintf("Opened device \"echo\" successfully.\n");
return(err);
}
static int
wtap_node_close(struct cdev *dev, int fflag, int devtype, struct thread *p)
{
uprintf("Closing device \"echo.\"\n");
return(0);
}
static int
wtap_node_write(struct cdev *dev, struct uio *uio, int ioflag)
{
int err = 0;
struct mbuf *m;
struct ifnet *ifp;
struct wtap_softc *sc;
uint8_t buf[1024];
int buf_len;
uprintf("write device %s \"echo.\"\n", devtoname(dev));
buf_len = MIN(uio->uio_iov->iov_len, 1024);
err = copyin(uio->uio_iov->iov_base, buf, buf_len);
if (err != 0) {
uprintf("Write failed: bad address!\n");
return (err);
}
MGETHDR(m, M_NOWAIT, MT_DATA);
m_copyback(m, 0, buf_len, buf);
CURVNET_SET(TD_TO_VNET(curthread));
IFNET_RLOCK_NOSLEEP();
TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
printf("ifp->if_xname = %s\n", ifp->if_xname);
if(strcmp(devtoname(dev), ifp->if_xname) == 0){
printf("found match, correspoding wtap = %s\n",
ifp->if_xname);
sc = (struct wtap_softc *)ifp->if_softc;
printf("wtap id = %d\n", sc->id);
wtap_inject(sc, m);
}
}
IFNET_RUNLOCK_NOSLEEP();
CURVNET_RESTORE();
return(err);
}
int
wtap_node_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
int fflag, struct thread *td)
{
int error = 0;
switch(cmd) {
default:
DWTAP_PRINTF("Unkown WTAP IOCTL\n");
error = EINVAL;
}
return error;
}
static int wtap_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params);
static int
wtap_medium_enqueue(struct wtap_vap *avp, struct mbuf *m)
{
return medium_transmit(avp->av_md, avp->id, m);
}
static int
wtap_media_change(struct ifnet *ifp)
{
DWTAP_PRINTF("%s\n", __func__);
int error = ieee80211_media_change(ifp);
/* NB: only the fixed rate can change and that doesn't need a reset */
return (error == ENETRESET ? 0 : error);
}
/*
* Intercept management frames to collect beacon rssi data
* and to do ibss merges.
*/
static void
wtap_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
int subtype, const struct ieee80211_rx_stats *stats, int rssi, int nf)
{
struct ieee80211vap *vap = ni->ni_vap;
#if 0
DWTAP_PRINTF("[%d] %s\n", myath_id(ni), __func__);
#endif
WTAP_VAP(vap)->av_recv_mgmt(ni, m, subtype, stats, rssi, nf);
}
static int
wtap_reset_vap(struct ieee80211vap *vap, u_long cmd)
{
DWTAP_PRINTF("%s\n", __func__);
return 0;
}
static void
wtap_beacon_update(struct ieee80211vap *vap, int item)
{
struct ieee80211_beacon_offsets *bo = &WTAP_VAP(vap)->av_boff;
DWTAP_PRINTF("%s\n", __func__);
setbit(bo->bo_flags, item);
}
/*
* Allocate and setup an initial beacon frame.
*/
static int
wtap_beacon_alloc(struct wtap_softc *sc, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct wtap_vap *avp = WTAP_VAP(vap);
DWTAP_PRINTF("[%s] %s\n", ether_sprintf(ni->ni_macaddr), __func__);
/*
* NB: the beacon data buffer must be 32-bit aligned;
* we assume the mbuf routines will return us something
* with this alignment (perhaps should assert).
*/
avp->beacon = ieee80211_beacon_alloc(ni, &avp->av_boff);
if (avp->beacon == NULL) {
printf("%s: cannot get mbuf\n", __func__);
return ENOMEM;
}
callout_init(&avp->av_swba, 0);
avp->bf_node = ieee80211_ref_node(ni);
return 0;
}
static void
wtap_beacon_config(struct wtap_softc *sc, struct ieee80211vap *vap)
{
DWTAP_PRINTF("%s\n", __func__);
}
static void
wtap_beacon_intrp(void *arg)
{
struct wtap_vap *avp = arg;
struct ieee80211vap *vap = arg;
struct mbuf *m;
if (vap->iv_state < IEEE80211_S_RUN) {
DWTAP_PRINTF("Skip beacon, not running, state %d", vap->iv_state);
return ;
}
DWTAP_PRINTF("[%d] beacon intrp\n", avp->id); //burst mode
/*
* Update dynamic beacon contents. If this returns
* non-zero then we need to remap the memory because
* the beacon frame changed size (probably because
* of the TIM bitmap).
*/
m = m_dup(avp->beacon, M_NOWAIT);
if (ieee80211_beacon_update(avp->bf_node, &avp->av_boff, m, 0)) {
printf("%s, need to remap the memory because the beacon frame"
" changed size.\n",__func__);
}
if (ieee80211_radiotap_active_vap(vap))
ieee80211_radiotap_tx(vap, m);
#if 0
medium_transmit(avp->av_md, avp->id, m);
#endif
wtap_medium_enqueue(avp, m);
callout_schedule(&avp->av_swba, avp->av_bcinterval);
}
static int
wtap_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ieee80211com *ic = vap->iv_ic;
- struct wtap_softc *sc = ic->ic_ifp->if_softc;
+ struct wtap_softc *sc = ic->ic_softc;
struct wtap_vap *avp = WTAP_VAP(vap);
struct ieee80211_node *ni = NULL;
int error;
DWTAP_PRINTF("%s\n", __func__);
ni = ieee80211_ref_node(vap->iv_bss);
/*
* Invoke the parent method to do net80211 work.
*/
error = avp->av_newstate(vap, nstate, arg);
if (error != 0)
goto bad;
if (nstate == IEEE80211_S_RUN) {
/* NB: collect bss node again, it may have changed */
ieee80211_free_node(ni);
ni = ieee80211_ref_node(vap->iv_bss);
switch (vap->iv_opmode) {
case IEEE80211_M_MBSS:
error = wtap_beacon_alloc(sc, ni);
if (error != 0)
goto bad;
wtap_beacon_config(sc, vap);
callout_reset(&avp->av_swba, avp->av_bcinterval,
wtap_beacon_intrp, vap);
break;
default:
goto bad;
}
} else if (nstate == IEEE80211_S_INIT) {
callout_stop(&avp->av_swba);
}
ieee80211_free_node(ni);
return 0;
bad:
printf("%s: bad\n", __func__);
ieee80211_free_node(ni);
return error;
}
static void
wtap_bmiss(struct ieee80211vap *vap)
{
struct wtap_vap *avp = (struct wtap_vap *)vap;
DWTAP_PRINTF("%s\n", __func__);
avp->av_bmiss(vap);
}
static struct ieee80211vap *
wtap_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ],
int unit, enum ieee80211_opmode opmode, int flags,
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t mac[IEEE80211_ADDR_LEN])
{
- struct wtap_softc *sc = ic->ic_ifp->if_softc;
+ struct wtap_softc *sc = ic->ic_softc;
struct ieee80211vap *vap;
struct wtap_vap *avp;
int error;
struct ieee80211_node *ni;
DWTAP_PRINTF("%s\n", __func__);
- avp = malloc(sizeof(struct wtap_vap), M_80211_VAP, M_NOWAIT | M_ZERO);
- if (avp == NULL)
- return (NULL);
+ avp = malloc(sizeof(struct wtap_vap), M_80211_VAP, M_WAITOK | M_ZERO);
avp->id = sc->id;
avp->av_md = sc->sc_md;
avp->av_bcinterval = msecs_to_ticks(BEACON_INTRERVAL + 100*sc->id);
vap = (struct ieee80211vap *) avp;
error = ieee80211_vap_setup(ic, vap, name, unit, IEEE80211_M_MBSS,
- flags | IEEE80211_CLONE_NOBEACONS, bssid, mac);
+ flags | IEEE80211_CLONE_NOBEACONS, bssid);
if (error) {
free(avp, M_80211_VAP);
return (NULL);
}
/* override various methods */
avp->av_recv_mgmt = vap->iv_recv_mgmt;
vap->iv_recv_mgmt = wtap_recv_mgmt;
vap->iv_reset = wtap_reset_vap;
vap->iv_update_beacon = wtap_beacon_update;
avp->av_newstate = vap->iv_newstate;
vap->iv_newstate = wtap_newstate;
avp->av_bmiss = vap->iv_bmiss;
vap->iv_bmiss = wtap_bmiss;
/* complete setup */
- ieee80211_vap_attach(vap, wtap_media_change, ieee80211_media_status);
+ ieee80211_vap_attach(vap, wtap_media_change, ieee80211_media_status,
+ mac);
avp->av_dev = make_dev(&wtap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
- "%s", (const char *)ic->ic_ifp->if_xname);
+ "%s", (const char *)sc->name);
/* TODO this is a hack to force it to choose the rate we want */
ni = ieee80211_ref_node(vap->iv_bss);
ni->ni_txrate = 130;
ieee80211_free_node(ni);
return vap;
}
static void
wtap_vap_delete(struct ieee80211vap *vap)
{
struct wtap_vap *avp = WTAP_VAP(vap);
DWTAP_PRINTF("%s\n", __func__);
destroy_dev(avp->av_dev);
callout_stop(&avp->av_swba);
ieee80211_vap_detach(vap);
free((struct wtap_vap*) vap, M_80211_VAP);
}
-/* NB: This function is not used.
- * I had the problem of the queue
- * being empty all the time.
- * Maybe I am setting the queue wrong?
- */
static void
-wtap_start(struct ifnet *ifp)
+wtap_parent(struct ieee80211com *ic)
{
- struct ieee80211com *ic = ifp->if_l2com;
- struct ifnet *icifp = ic->ic_ifp;
- struct wtap_softc *sc = icifp->if_softc;
- struct ieee80211_node *ni;
- struct mbuf *m;
+ struct wtap_softc *sc = ic->ic_softc;
- DWTAP_PRINTF("my_start, with id=%u\n", sc->id);
-
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->up == 0)
- return;
- for (;;) {
- if(IFQ_IS_EMPTY(&ifp->if_snd)){
- printf("queue empty, just trying to see "
- "if the other queue is empty\n");
-#if 0
- printf("queue for id=1, %u\n",
- IFQ_IS_EMPTY(&global_mscs[1]->ifp->if_snd));
- printf("queue for id=0, %u\n",
- IFQ_IS_EMPTY(&global_mscs[0]->ifp->if_snd));
-#endif
- break;
- }
- IFQ_DEQUEUE(&ifp->if_snd, m);
- if (m == NULL) {
- printf("error dequeueing from ifp->snd\n");
- break;
- }
- ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
- /*
- * Check for fragmentation. If this frame
- * has been broken up verify we have enough
- * buffers to send all the fragments so all
- * go out or none...
- */
-#if 0
- STAILQ_INIT(&frags);
-#endif
- if ((m->m_flags & M_FRAG)){
- printf("dont support frags\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- return;
- }
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- if(wtap_raw_xmit(ni, m, NULL) < 0){
- printf("error raw_xmiting\n");
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- return;
- }
- }
+ if (ic->ic_nrunning > 0) {
+ sc->up = 1;
+ ieee80211_start_all(ic);
+ } else
+ sc->up = 0;
}
-static int
-wtap_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
-{
-#if 0
- DWTAP_PRINTF("%s\n", __func__);
- uprintf("%s, command %lu\n", __func__, cmd);
-#endif
-#define IS_RUNNING(ifp) \
- ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
- struct ieee80211com *ic = ifp->if_l2com;
- struct wtap_softc *sc = ifp->if_softc;
- struct ifreq *ifr = (struct ifreq *)data;
- int error = 0;
-
- switch (cmd) {
- case SIOCSIFFLAGS:
- //printf("%s: %s\n", __func__, "SIOCSIFFLAGS");
- if (IS_RUNNING(ifp)) {
- DWTAP_PRINTF("running\n");
-#if 0
- /*
- * To avoid rescanning another access point,
- * do not call ath_init() here. Instead,
- * only reflect promisc mode settings.
- */
- //ath_mode_init(sc);
-#endif
- } else if (ifp->if_flags & IFF_UP) {
- DWTAP_PRINTF("up\n");
- sc->up = 1;
-#if 0
- /*
- * Beware of being called during attach/detach
- * to reset promiscuous mode. In that case we
- * will still be marked UP but not RUNNING.
- * However trying to re-init the interface
- * is the wrong thing to do as we've already
- * torn down much of our state. There's
- * probably a better way to deal with this.
- */
- //if (!sc->sc_invalid)
- // ath_init(sc); /* XXX lose error */
-#endif
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
- ieee80211_start_all(ic);
- } else {
- DWTAP_PRINTF("stoping\n");
-#if 0
- ath_stop_locked(ifp);
-#ifdef notyet
- /* XXX must wakeup in places like ath_vap_delete */
- if (!sc->sc_invalid)
- ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
-#endif
-#endif
- }
- break;
- case SIOCGIFMEDIA:
- case SIOCSIFMEDIA:
-#if 0
- DWTAP_PRINTF("%s: %s\n", __func__, "SIOCGIFMEDIA|SIOCSIFMEDIA");
-#endif
- error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
- break;
- case SIOCGIFADDR:
-#if 0
- DWTAP_PRINTF("%s: %s\n", __func__, "SIOCGIFADDR");
-#endif
- error = ether_ioctl(ifp, cmd, data);
- break;
- default:
- DWTAP_PRINTF("%s: %s [%lu]\n", __func__, "EINVAL", cmd);
- error = EINVAL;
- break;
- }
- return error;
-#undef IS_RUNNING
-}
-
static void
-wtap_init(void *arg){
-
- DWTAP_PRINTF("%s\n", __func__);
-}
-
-static void
wtap_scan_start(struct ieee80211com *ic)
{
#if 0
DWTAP_PRINTF("%s\n", __func__);
#endif
}
static void
wtap_scan_end(struct ieee80211com *ic)
{
#if 0
DWTAP_PRINTF("%s\n", __func__);
#endif
}
static void
wtap_set_channel(struct ieee80211com *ic)
{
#if 0
DWTAP_PRINTF("%s\n", __func__);
#endif
}
static int
wtap_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
#if 0
DWTAP_PRINTF("%s, %p\n", __func__, m);
#endif
struct ieee80211vap *vap = ni->ni_vap;
struct wtap_vap *avp = WTAP_VAP(vap);
if (ieee80211_radiotap_active_vap(vap)) {
ieee80211_radiotap_tx(vap, m);
}
if (m->m_flags & M_TXCB)
ieee80211_process_callback(ni, m, 0);
ieee80211_free_node(ni);
return wtap_medium_enqueue(avp, m);
}
void
wtap_inject(struct wtap_softc *sc, struct mbuf *m)
{
struct wtap_buf *bf = (struct wtap_buf *)malloc(sizeof(struct wtap_buf),
M_WTAP_RXBUF, M_NOWAIT | M_ZERO);
KASSERT(bf != NULL, ("could not allocated a new wtap_buf\n"));
bf->m = m;
mtx_lock(&sc->sc_mtx);
STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
mtx_unlock(&sc->sc_mtx);
}
void
wtap_rx_deliver(struct wtap_softc *sc, struct mbuf *m)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211_node *ni;
int type;
#if 0
DWTAP_PRINTF("%s\n", __func__);
#endif
DWTAP_PRINTF("[%d] receiving m=%p\n", sc->id, m);
if (m == NULL) { /* NB: shouldn't happen */
- if_printf(ifp, "%s: no mbuf!\n", __func__);
+ ic_printf(ic, "%s: no mbuf!\n", __func__);
}
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
-
ieee80211_dump_pkt(ic, mtod(m, caddr_t), 0,0,0);
/*
* Locate the node for sender, track state, and then
* pass the (referenced) node up to the 802.11 layer
* for its use.
*/
ni = ieee80211_find_rxnode_withkey(ic,
mtod(m, const struct ieee80211_frame_min *),IEEE80211_KEYIX_NONE);
if (ni != NULL) {
/*
* Sending station is known, dispatch directly.
*/
type = ieee80211_input(ni, m, 1<<7, 10);
ieee80211_free_node(ni);
} else {
type = ieee80211_input_all(ic, m, 1<<7, 10);
}
}
static void
wtap_rx_proc(void *arg, int npending)
{
struct wtap_softc *sc = (struct wtap_softc *)arg;
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
struct mbuf *m;
struct ieee80211_node *ni;
int type;
struct wtap_buf *bf;
#if 0
DWTAP_PRINTF("%s\n", __func__);
#endif
for(;;) {
mtx_lock(&sc->sc_mtx);
bf = STAILQ_FIRST(&sc->sc_rxbuf);
if (bf == NULL) {
mtx_unlock(&sc->sc_mtx);
return;
}
STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
mtx_unlock(&sc->sc_mtx);
KASSERT(bf != NULL, ("wtap_buf is NULL\n"));
m = bf->m;
DWTAP_PRINTF("[%d] receiving m=%p\n", sc->id, bf->m);
if (m == NULL) { /* NB: shouldn't happen */
- if_printf(ifp, "%s: no mbuf!\n", __func__);
+ ic_printf(ic, "%s: no mbuf!\n", __func__);
free(bf, M_WTAP_RXBUF);
return;
}
-
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
#if 0
ieee80211_dump_pkt(ic, mtod(m, caddr_t), 0,0,0);
#endif
/*
* Locate the node for sender, track state, and then
* pass the (referenced) node up to the 802.11 layer
* for its use.
*/
ni = ieee80211_find_rxnode_withkey(ic,
mtod(m, const struct ieee80211_frame_min *),
IEEE80211_KEYIX_NONE);
if (ni != NULL) {
/*
* Sending station is known, dispatch directly.
*/
#if 0
ieee80211_radiotap_rx(ni->ni_vap, m);
#endif
type = ieee80211_input(ni, m, 1<<7, 10);
ieee80211_free_node(ni);
} else {
#if 0
ieee80211_radiotap_rx_all(ic, m);
#endif
type = ieee80211_input_all(ic, m, 1<<7, 10);
}
/* The mbufs are freed by the Net80211 stack */
free(bf, M_WTAP_RXBUF);
}
}
static void
wtap_newassoc(struct ieee80211_node *ni, int isnew)
{
DWTAP_PRINTF("%s\n", __func__);
}
/*
* Callback from the 802.11 layer to update WME parameters.
*/
static int
wtap_wme_update(struct ieee80211com *ic)
{
DWTAP_PRINTF("%s\n", __func__);
return 0;
}
static void
wtap_update_mcast(struct ieee80211com *ic)
{
DWTAP_PRINTF("%s\n", __func__);
}
static void
wtap_update_promisc(struct ieee80211com *ic)
{
DWTAP_PRINTF("%s\n", __func__);
}
static int
-wtap_if_transmit(struct ifnet *ifp, struct mbuf *m)
+wtap_transmit(struct ieee80211com *ic, struct mbuf *m)
{
struct ieee80211_node *ni =
(struct ieee80211_node *) m->m_pkthdr.rcvif;
struct ieee80211vap *vap = ni->ni_vap;
struct wtap_vap *avp = WTAP_VAP(vap);
if(ni == NULL){
printf("m->m_pkthdr.rcvif is NULL we cant radiotap_tx\n");
}else{
if (ieee80211_radiotap_active_vap(vap))
ieee80211_radiotap_tx(vap, m);
}
if (m->m_flags & M_TXCB)
ieee80211_process_callback(ni, m, 0);
ieee80211_free_node(ni);
return wtap_medium_enqueue(avp, m);
}
static struct ieee80211_node *
wtap_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ieee80211_node *ni;
DWTAP_PRINTF("%s\n", __func__);
ni = malloc(sizeof(struct ieee80211_node), M_80211_NODE,
M_NOWAIT|M_ZERO);
ni->ni_txrate = 130;
return ni;
}
static void
wtap_node_free(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
- struct wtap_softc *sc = ic->ic_ifp->if_softc;
+ struct wtap_softc *sc = ic->ic_softc;
DWTAP_PRINTF("%s\n", __func__);
sc->sc_node_free(ni);
}
int32_t
wtap_attach(struct wtap_softc *sc, const uint8_t *macaddr)
{
- struct ifnet *ifp;
- struct ieee80211com *ic;
- char wtap_name[] = {'w','T','a','p',sc->id,
- '_','t','a','s','k','q','\0'};
+ struct ieee80211com *ic = &sc->sc_ic;
DWTAP_PRINTF("%s\n", __func__);
- ifp = if_alloc(IFT_IEEE80211);
- if (ifp == NULL) {
- printf("can not if_alloc()\n");
- return -1;
- }
- ic = ifp->if_l2com;
- if_initname(ifp, "wtap", sc->id);
-
- sc->sc_ifp = ifp;
sc->up = 0;
-
STAILQ_INIT(&sc->sc_rxbuf);
- sc->sc_tq = taskqueue_create(wtap_name, M_NOWAIT | M_ZERO,
+ sc->sc_tq = taskqueue_create("wtap_taskq", M_NOWAIT | M_ZERO,
taskqueue_thread_enqueue, &sc->sc_tq);
- taskqueue_start_threads(&sc->sc_tq, 1, PI_SOFT, "%s taskQ",
- ifp->if_xname);
+ taskqueue_start_threads(&sc->sc_tq, 1, PI_SOFT, "%s taskQ", sc->name);
TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc);
- ifp->if_softc = sc;
- ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
- ifp->if_start = wtap_start;
- ifp->if_ioctl = wtap_ioctl;
- ifp->if_init = wtap_init;
- IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
- ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
- IFQ_SET_READY(&ifp->if_snd);
-
- ic->ic_ifp = ifp;
ic->ic_softc = sc;
ic->ic_name = sc->name;
ic->ic_phytype = IEEE80211_T_DS;
ic->ic_opmode = IEEE80211_M_MBSS;
ic->ic_caps = IEEE80211_C_MBSS;
ic->ic_max_keyix = 128; /* A value read from Atheros ATH_KEYMAX */
ic->ic_regdomain.regdomain = SKU_ETSI;
ic->ic_regdomain.country = CTRY_SWEDEN;
ic->ic_regdomain.location = 1; /* Indoors */
ic->ic_regdomain.isocc[0] = 'S';
ic->ic_regdomain.isocc[1] = 'E';
ic->ic_nchans = 1;
ic->ic_channels[0].ic_flags = IEEE80211_CHAN_B;
ic->ic_channels[0].ic_freq = 2412;
- ieee80211_ifattach(ic, macaddr);
+ IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr);
+ ieee80211_ifattach(ic);
-#if 0
- /* new prototype hook-ups */
- msc->if_input = ifp->if_input;
- ifp->if_input = myath_if_input;
- msc->if_output = ifp->if_output;
- ifp->if_output = myath_if_output;
-#endif
- sc->if_transmit = ifp->if_transmit;
- ifp->if_transmit = wtap_if_transmit;
-
/* override default methods */
ic->ic_newassoc = wtap_newassoc;
ic->ic_wme.wme_update = wtap_wme_update;
ic->ic_vap_create = wtap_vap_create;
ic->ic_vap_delete = wtap_vap_delete;
ic->ic_raw_xmit = wtap_raw_xmit;
ic->ic_update_mcast = wtap_update_mcast;
ic->ic_update_promisc = wtap_update_promisc;
+ ic->ic_transmit = wtap_transmit;
+ ic->ic_parent = wtap_parent;
sc->sc_node_alloc = ic->ic_node_alloc;
ic->ic_node_alloc = wtap_node_alloc;
sc->sc_node_free = ic->ic_node_free;
ic->ic_node_free = wtap_node_free;
-#if 0
- ic->ic_node_getsignal = myath_node_getsignal;
-#endif
ic->ic_scan_start = wtap_scan_start;
ic->ic_scan_end = wtap_scan_end;
ic->ic_set_channel = wtap_set_channel;
ieee80211_radiotap_attach(ic,
&sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
WTAP_TX_RADIOTAP_PRESENT,
&sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
WTAP_RX_RADIOTAP_PRESENT);
/* Work here, we must find a way to populate the rate table */
#if 0
if(ic->ic_rt == NULL){
printf("no table for ic_curchan\n");
ic->ic_rt = ieee80211_get_ratetable(&ic->ic_channels[0]);
}
printf("ic->ic_rt =%p\n", ic->ic_rt);
printf("rate count %d\n", ic->ic_rt->rateCount);
uint8_t code = ic->ic_rt->info[0].dot11Rate;
uint8_t cix = ic->ic_rt->info[0].ctlRateIndex;
uint8_t ctl_rate = ic->ic_rt->info[cix].dot11Rate;
printf("code=%d, cix=%d, ctl_rate=%d\n", code, cix, ctl_rate);
uint8_t rix0 = ic->ic_rt->rateCodeToIndex[130];
uint8_t rix1 = ic->ic_rt->rateCodeToIndex[132];
uint8_t rix2 = ic->ic_rt->rateCodeToIndex[139];
uint8_t rix3 = ic->ic_rt->rateCodeToIndex[150];
printf("rix0 %u,rix1 %u,rix2 %u,rix3 %u\n", rix0,rix1,rix2,rix3);
printf("lpAckDuration=%u\n", ic->ic_rt->info[0].lpAckDuration);
printf("rate=%d\n", ic->ic_rt->info[0].rateKbps);
#endif
return 0;
}
int32_t
wtap_detach(struct wtap_softc *sc)
{
- struct ifnet *ifp = sc->sc_ifp;
- struct ieee80211com *ic = ifp->if_l2com;
+ struct ieee80211com *ic = &sc->sc_ic;
DWTAP_PRINTF("%s\n", __func__);
ieee80211_ageq_drain(&ic->ic_stageq);
ieee80211_ifdetach(ic);
- if_free(ifp);
return 0;
}
void
wtap_resume(struct wtap_softc *sc)
{
DWTAP_PRINTF("%s\n", __func__);
}
void
wtap_suspend(struct wtap_softc *sc)
{
DWTAP_PRINTF("%s\n", __func__);
}
void
wtap_shutdown(struct wtap_softc *sc)
{
DWTAP_PRINTF("%s\n", __func__);
}
void
wtap_intr(struct wtap_softc *sc)
{
DWTAP_PRINTF("%s\n", __func__);
}
Index: head/sys/dev/wtap/if_wtapvar.h
===================================================================
--- head/sys/dev/wtap/if_wtapvar.h (revision 287196)
+++ head/sys/dev/wtap/if_wtapvar.h (revision 287197)
@@ -1,164 +1,160 @@
/*-
* Copyright (c) 2010-2011 Monthadar Al Jaberi, TerraNet AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
#ifndef _DEV_WTAP_WTAPVAR_H
#define _DEV_WTAP_WTAPVAR_H
#include <sys/cdefs.h>
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/types.h>
#include <sys/sockio.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/errno.h>
#include <sys/callout.h>
#include <sys/endian.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/priv.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <net/if.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#include <net/bpf.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#if 0
#define DWTAP_PRINTF(...) printf(__VA_ARGS__)
#else
#define DWTAP_PRINTF(...)
#endif
#include "if_wtapioctl.h"
#define MAX_NBR_WTAP (64)
#define BEACON_INTRERVAL (1000)
MALLOC_DECLARE(M_WTAP);
MALLOC_DECLARE(M_WTAP_PACKET);
MALLOC_DECLARE(M_WTAP_BEACON);
MALLOC_DECLARE(M_WTAP_RXBUF);
MALLOC_DECLARE(M_WTAP_PLUGIN);
/* driver-specific node state */
struct wtap_node {
struct ieee80211_node an_node; /* base class */
/* future addons */
};
#define WTAP_NODE(ni) ((struct ath_node *)(ni))
#define WTAP_NODE_CONST(ni) ((const struct ath_node *)(ni))
struct wtap_buf {
STAILQ_ENTRY(wtap_buf) bf_list;
struct mbuf *m; /* mbuf for buf */
};
typedef STAILQ_HEAD(, wtap_buf) wtap_bufhead;
#define WTAP_BUF_BUSY 0x00000002 /* (tx) desc owned by h/w */
struct wtap_vap {
struct ieee80211vap av_vap; /* base class */
int32_t id; /* wtap id */
struct cdev *av_dev; /* userspace injecting frames */
struct wtap_medium *av_md; /* back pointer */
struct mbuf *beacon; /* beacon */
struct ieee80211_node *bf_node; /* pointer to the node */
struct ieee80211_beacon_offsets av_boff;/* dynamic update state */
struct callout av_swba; /* software beacon alert */
uint32_t av_bcinterval; /* beacon interval */
void (*av_recv_mgmt)(struct ieee80211_node *,
struct mbuf *, int, const struct ieee80211_rx_stats *, int, int);
int (*av_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
void (*av_bmiss)(struct ieee80211vap *);
};
#define WTAP_VAP(vap) ((struct wtap_vap *)(vap))
struct taskqueue;
struct wtap_softc {
+ struct ieee80211com sc_ic;
char name[7]; /* wtapXX\0 */
int32_t id;
int32_t up;
- struct ifnet *sc_ifp; /* interface common */
struct wtap_medium *sc_md; /* interface medium */
struct ieee80211_node* (* sc_node_alloc)
(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]);
void (*sc_node_free)(struct ieee80211_node *);
- int (*if_output) /* output routine (enqueue) */
- (struct ifnet *, struct mbuf *, struct sockaddr *, struct route *);
- void (*if_input) (struct ifnet *, struct mbuf *);/* from h/w driver */
- int (*if_transmit)(struct ifnet *, struct mbuf *);/* output routine */
struct mtx sc_mtx; /* master lock (recursive) */
struct taskqueue *sc_tq; /* private task queue */
wtap_bufhead sc_rxbuf; /* receive buffer */
struct task sc_rxtask; /* rx int processing */
struct wtap_tx_radiotap_header sc_tx_th;
int sc_tx_th_len;
struct wtap_rx_radiotap_header sc_rx_th;
int sc_rx_th_len;
};
int32_t wtap_attach(struct wtap_softc *, const uint8_t *macaddr);
int32_t wtap_detach(struct wtap_softc *);
void wtap_resume(struct wtap_softc *);
void wtap_suspend(struct wtap_softc *);
void wtap_shutdown(struct wtap_softc *);
void wtap_intr(struct wtap_softc *);
void wtap_inject(struct wtap_softc *, struct mbuf *);
void wtap_rx_deliver(struct wtap_softc *, struct mbuf *);
#endif
Index: head/sys/net80211/ieee80211.c
===================================================================
--- head/sys/net80211/ieee80211.c (revision 287196)
+++ head/sys/net80211/ieee80211.c (revision 287197)
@@ -1,1877 +1,1813 @@
/*-
* Copyright (c) 2001 Atsushi Onoe
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* IEEE 802.11 generic handler
*/
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/socket.h>
+#include <sys/sbuf.h>
#include <machine/stdarg.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/ethernet.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#include <net80211/ieee80211_ratectl.h>
#include <net/bpf.h>
const char *ieee80211_phymode_name[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = "auto",
[IEEE80211_MODE_11A] = "11a",
[IEEE80211_MODE_11B] = "11b",
[IEEE80211_MODE_11G] = "11g",
[IEEE80211_MODE_FH] = "FH",
[IEEE80211_MODE_TURBO_A] = "turboA",
[IEEE80211_MODE_TURBO_G] = "turboG",
[IEEE80211_MODE_STURBO_A] = "sturboA",
[IEEE80211_MODE_HALF] = "half",
[IEEE80211_MODE_QUARTER] = "quarter",
[IEEE80211_MODE_11NA] = "11na",
[IEEE80211_MODE_11NG] = "11ng",
};
/* map ieee80211_opmode to the corresponding capability bit */
const int ieee80211_opcap[IEEE80211_OPMODE_MAX] = {
[IEEE80211_M_IBSS] = IEEE80211_C_IBSS,
[IEEE80211_M_WDS] = IEEE80211_C_WDS,
[IEEE80211_M_STA] = IEEE80211_C_STA,
[IEEE80211_M_AHDEMO] = IEEE80211_C_AHDEMO,
[IEEE80211_M_HOSTAP] = IEEE80211_C_HOSTAP,
[IEEE80211_M_MONITOR] = IEEE80211_C_MONITOR,
#ifdef IEEE80211_SUPPORT_MESH
[IEEE80211_M_MBSS] = IEEE80211_C_MBSS,
#endif
};
const uint8_t ieee80211broadcastaddr[IEEE80211_ADDR_LEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
static void ieee80211_syncflag_locked(struct ieee80211com *ic, int flag);
static void ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag);
static void ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag);
static int ieee80211_media_setup(struct ieee80211com *ic,
struct ifmedia *media, int caps, int addsta,
ifm_change_cb_t media_change, ifm_stat_cb_t media_stat);
-static void ieee80211com_media_status(struct ifnet *, struct ifmediareq *);
-static int ieee80211com_media_change(struct ifnet *);
static int media_status(enum ieee80211_opmode,
const struct ieee80211_channel *);
static uint64_t ieee80211_get_counter(struct ifnet *, ift_counter);
MALLOC_DEFINE(M_80211_VAP, "80211vap", "802.11 vap state");
/*
* Default supported rates for 802.11 operation (in IEEE .5Mb units).
*/
#define B(r) ((r) | IEEE80211_RATE_BASIC)
static const struct ieee80211_rateset ieee80211_rateset_11a =
{ 8, { B(12), 18, B(24), 36, B(48), 72, 96, 108 } };
static const struct ieee80211_rateset ieee80211_rateset_half =
{ 8, { B(6), 9, B(12), 18, B(24), 36, 48, 54 } };
static const struct ieee80211_rateset ieee80211_rateset_quarter =
{ 8, { B(3), 4, B(6), 9, B(12), 18, 24, 27 } };
static const struct ieee80211_rateset ieee80211_rateset_11b =
{ 4, { B(2), B(4), B(11), B(22) } };
/* NB: OFDM rates are handled specially based on mode */
static const struct ieee80211_rateset ieee80211_rateset_11g =
{ 12, { B(2), B(4), B(11), B(22), 12, 18, 24, 36, 48, 72, 96, 108 } };
#undef B
/*
* Fill in 802.11 available channel set, mark
* all available channels as active, and pick
* a default channel if not already specified.
*/
-static void
+void
ieee80211_chan_init(struct ieee80211com *ic)
{
#define DEFAULTRATES(m, def) do { \
if (ic->ic_sup_rates[m].rs_nrates == 0) \
ic->ic_sup_rates[m] = def; \
} while (0)
struct ieee80211_channel *c;
int i;
KASSERT(0 < ic->ic_nchans && ic->ic_nchans <= IEEE80211_CHAN_MAX,
("invalid number of channels specified: %u", ic->ic_nchans));
memset(ic->ic_chan_avail, 0, sizeof(ic->ic_chan_avail));
memset(ic->ic_modecaps, 0, sizeof(ic->ic_modecaps));
setbit(ic->ic_modecaps, IEEE80211_MODE_AUTO);
for (i = 0; i < ic->ic_nchans; i++) {
c = &ic->ic_channels[i];
KASSERT(c->ic_flags != 0, ("channel with no flags"));
/*
* Help drivers that work only with frequencies by filling
* in IEEE channel #'s if not already calculated. Note this
* mimics similar work done in ieee80211_setregdomain when
* changing regulatory state.
*/
if (c->ic_ieee == 0)
c->ic_ieee = ieee80211_mhz2ieee(c->ic_freq,c->ic_flags);
if (IEEE80211_IS_CHAN_HT40(c) && c->ic_extieee == 0)
c->ic_extieee = ieee80211_mhz2ieee(c->ic_freq +
(IEEE80211_IS_CHAN_HT40U(c) ? 20 : -20),
c->ic_flags);
/* default max tx power to max regulatory */
if (c->ic_maxpower == 0)
c->ic_maxpower = 2*c->ic_maxregpower;
setbit(ic->ic_chan_avail, c->ic_ieee);
/*
* Identify mode capabilities.
*/
if (IEEE80211_IS_CHAN_A(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11A);
if (IEEE80211_IS_CHAN_B(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11B);
if (IEEE80211_IS_CHAN_ANYG(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11G);
if (IEEE80211_IS_CHAN_FHSS(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_FH);
if (IEEE80211_IS_CHAN_108A(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_A);
if (IEEE80211_IS_CHAN_108G(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_TURBO_G);
if (IEEE80211_IS_CHAN_ST(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_STURBO_A);
if (IEEE80211_IS_CHAN_HALF(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_HALF);
if (IEEE80211_IS_CHAN_QUARTER(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_QUARTER);
if (IEEE80211_IS_CHAN_HTA(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11NA);
if (IEEE80211_IS_CHAN_HTG(c))
setbit(ic->ic_modecaps, IEEE80211_MODE_11NG);
}
/* initialize candidate channels to all available */
memcpy(ic->ic_chan_active, ic->ic_chan_avail,
sizeof(ic->ic_chan_avail));
/* sort channel table to allow lookup optimizations */
ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans);
/* invalidate any previous state */
ic->ic_bsschan = IEEE80211_CHAN_ANYC;
ic->ic_prevchan = NULL;
ic->ic_csa_newchan = NULL;
/* arbitrarily pick the first channel */
ic->ic_curchan = &ic->ic_channels[0];
ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan);
/* fillin well-known rate sets if driver has not specified */
DEFAULTRATES(IEEE80211_MODE_11B, ieee80211_rateset_11b);
DEFAULTRATES(IEEE80211_MODE_11G, ieee80211_rateset_11g);
DEFAULTRATES(IEEE80211_MODE_11A, ieee80211_rateset_11a);
DEFAULTRATES(IEEE80211_MODE_TURBO_A, ieee80211_rateset_11a);
DEFAULTRATES(IEEE80211_MODE_TURBO_G, ieee80211_rateset_11g);
DEFAULTRATES(IEEE80211_MODE_STURBO_A, ieee80211_rateset_11a);
DEFAULTRATES(IEEE80211_MODE_HALF, ieee80211_rateset_half);
DEFAULTRATES(IEEE80211_MODE_QUARTER, ieee80211_rateset_quarter);
DEFAULTRATES(IEEE80211_MODE_11NA, ieee80211_rateset_11a);
DEFAULTRATES(IEEE80211_MODE_11NG, ieee80211_rateset_11g);
/*
* Setup required information to fill the mcsset field, if driver did
* not. Assume a 2T2R setup for historic reasons.
*/
if (ic->ic_rxstream == 0)
ic->ic_rxstream = 2;
if (ic->ic_txstream == 0)
ic->ic_txstream = 2;
/*
* Set auto mode to reset active channel state and any desired channel.
*/
(void) ieee80211_setmode(ic, IEEE80211_MODE_AUTO);
#undef DEFAULTRATES
}
static void
null_update_mcast(struct ieee80211com *ic)
{
ic_printf(ic, "need multicast update callback\n");
}
static void
null_update_promisc(struct ieee80211com *ic)
{
ic_printf(ic, "need promiscuous mode update callback\n");
}
-static int
-null_transmit(struct ifnet *ifp, struct mbuf *m)
-{
- m_freem(m);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- return EACCES; /* XXX EIO/EPERM? */
-}
-
-static int
-null_output(struct ifnet *ifp, struct mbuf *m,
- const struct sockaddr *dst, struct route *ro)
-{
- if_printf(ifp, "discard raw packet\n");
- return null_transmit(ifp, m);
-}
-
static void
-null_input(struct ifnet *ifp, struct mbuf *m)
-{
- if_printf(ifp, "if_input should not be called\n");
- m_freem(m);
-}
-
-static void
null_update_chw(struct ieee80211com *ic)
{
ic_printf(ic, "%s: need callback\n", __func__);
}
int
ic_printf(struct ieee80211com *ic, const char * fmt, ...)
{
va_list ap;
int retval;
retval = printf("%s: ", ic->ic_name);
va_start(ap, fmt);
retval += vprintf(fmt, ap);
va_end(ap);
return (retval);
}
+static LIST_HEAD(, ieee80211com) ic_head = LIST_HEAD_INITIALIZER(ic_head);
+static struct mtx ic_list_mtx;
+MTX_SYSINIT(ic_list, &ic_list_mtx, "ieee80211com list", MTX_DEF);
+
+static int
+sysctl_ieee80211coms(SYSCTL_HANDLER_ARGS)
+{
+ struct ieee80211com *ic;
+ struct sbuf *sb;
+ char *sp;
+ int error;
+
+ sb = sbuf_new_auto();
+ sp = "";
+ mtx_lock(&ic_list_mtx);
+ LIST_FOREACH(ic, &ic_head, ic_next) {
+ sbuf_printf(sb, "%s%s", sp, ic->ic_name);
+ sp = " ";
+ }
+ mtx_unlock(&ic_list_mtx);
+ sbuf_finish(sb);
+ error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
+ sbuf_delete(sb);
+ return (error);
+}
+
+SYSCTL_PROC(_net_wlan, OID_AUTO, devices,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0,
+ sysctl_ieee80211coms, "A", "names of available 802.11 devices");
+
/*
* Attach/setup the common net80211 state. Called by
* the driver on attach to prior to creating any vap's.
*/
void
-ieee80211_ifattach(struct ieee80211com *ic,
- const uint8_t macaddr[IEEE80211_ADDR_LEN])
+ieee80211_ifattach(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct sockaddr_dl *sdl;
- struct ifaddr *ifa;
- KASSERT(ifp->if_type == IFT_IEEE80211, ("if_type %d", ifp->if_type));
-
IEEE80211_LOCK_INIT(ic, ic->ic_name);
IEEE80211_TX_LOCK_INIT(ic, ic->ic_name);
TAILQ_INIT(&ic->ic_vaps);
/* Create a taskqueue for all state changes */
ic->ic_tq = taskqueue_create("ic_taskq", M_WAITOK | M_ZERO,
taskqueue_thread_enqueue, &ic->ic_tq);
taskqueue_start_threads(&ic->ic_tq, 1, PI_NET, "%s net80211 taskq",
ic->ic_name);
ic->ic_ierrors = counter_u64_alloc(M_WAITOK);
ic->ic_oerrors = counter_u64_alloc(M_WAITOK);
/*
* Fill in 802.11 available channel set, mark all
* available channels as active, and pick a default
* channel if not already specified.
*/
- ieee80211_media_init(ic);
+ ieee80211_chan_init(ic);
ic->ic_update_mcast = null_update_mcast;
ic->ic_update_promisc = null_update_promisc;
ic->ic_update_chw = null_update_chw;
ic->ic_hash_key = arc4random();
ic->ic_bintval = IEEE80211_BINTVAL_DEFAULT;
ic->ic_lintval = ic->ic_bintval;
ic->ic_txpowlimit = IEEE80211_TXPOWER_MAX;
ieee80211_crypto_attach(ic);
ieee80211_node_attach(ic);
ieee80211_power_attach(ic);
ieee80211_proto_attach(ic);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_superg_attach(ic);
#endif
ieee80211_ht_attach(ic);
ieee80211_scan_attach(ic);
ieee80211_regdomain_attach(ic);
ieee80211_dfs_attach(ic);
ieee80211_sysctl_attach(ic);
- ifp->if_addrlen = IEEE80211_ADDR_LEN;
- ifp->if_hdrlen = 0;
-
- CURVNET_SET(vnet0);
-
- if_attach(ifp);
-
- ifp->if_mtu = IEEE80211_MTU_MAX;
- ifp->if_broadcastaddr = ieee80211broadcastaddr;
- ifp->if_output = null_output;
- ifp->if_input = null_input; /* just in case */
- ifp->if_resolvemulti = NULL; /* NB: callers check */
-
- ifa = ifaddr_byindex(ifp->if_index);
- KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
- sdl = (struct sockaddr_dl *)ifa->ifa_addr;
- sdl->sdl_type = IFT_ETHER; /* XXX IFT_IEEE80211? */
- sdl->sdl_alen = IEEE80211_ADDR_LEN;
- IEEE80211_ADDR_COPY(LLADDR(sdl), macaddr);
- ifa_free(ifa);
-
- CURVNET_RESTORE();
+ mtx_lock(&ic_list_mtx);
+ LIST_INSERT_HEAD(&ic_head, ic, ic_next);
+ mtx_unlock(&ic_list_mtx);
}
/*
* Detach net80211 state on device detach. Tear down
* all vap's and reclaim all common state prior to the
* device state going away. Note we may call back into
* driver; it must be prepared for this.
*/
void
ieee80211_ifdetach(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
struct ieee80211vap *vap;
- /*
- * This detaches the main interface, but not the vaps.
- * Each VAP may be in a separate VIMAGE.
- */
- CURVNET_SET(ifp->if_vnet);
- if_detach(ifp);
- CURVNET_RESTORE();
+ mtx_lock(&ic_list_mtx);
+ LIST_REMOVE(ic, ic_next);
+ mtx_unlock(&ic_list_mtx);
/*
* The VAP is responsible for setting and clearing
* the VIMAGE context.
*/
while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL)
ieee80211_vap_destroy(vap);
ieee80211_waitfor_parent(ic);
ieee80211_sysctl_detach(ic);
ieee80211_dfs_detach(ic);
ieee80211_regdomain_detach(ic);
ieee80211_scan_detach(ic);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_superg_detach(ic);
#endif
ieee80211_ht_detach(ic);
/* NB: must be called before ieee80211_node_detach */
ieee80211_proto_detach(ic);
ieee80211_crypto_detach(ic);
ieee80211_power_detach(ic);
ieee80211_node_detach(ic);
- /* XXX VNET needed? */
- ifmedia_removeall(&ic->ic_media);
counter_u64_free(ic->ic_ierrors);
counter_u64_free(ic->ic_oerrors);
taskqueue_free(ic->ic_tq);
IEEE80211_TX_LOCK_DESTROY(ic);
IEEE80211_LOCK_DESTROY(ic);
}
+struct ieee80211com *
+ieee80211_find_com(const char *name)
+{
+ struct ieee80211com *ic;
+
+ mtx_lock(&ic_list_mtx);
+ LIST_FOREACH(ic, &ic_head, ic_next)
+ if (strcmp(ic->ic_name, name) == 0)
+ break;
+ mtx_unlock(&ic_list_mtx);
+
+ return (ic);
+}
+
/*
* Default reset method for use with the ioctl support. This
* method is invoked after any state change in the 802.11
* layer that should be propagated to the hardware but not
* require re-initialization of the 802.11 state machine (e.g
* rescanning for an ap). We always return ENETRESET which
* should cause the driver to re-initialize the device. Drivers
* can override this method to implement more optimized support.
*/
static int
default_reset(struct ieee80211vap *vap, u_long cmd)
{
return ENETRESET;
}
/*
* Add underlying device errors to vap errors.
*/
static uint64_t
ieee80211_get_counter(struct ifnet *ifp, ift_counter cnt)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
uint64_t rv;
rv = if_get_counter_default(ifp, cnt);
switch (cnt) {
case IFCOUNTER_OERRORS:
rv += counter_u64_fetch(ic->ic_oerrors);
break;
case IFCOUNTER_IERRORS:
rv += counter_u64_fetch(ic->ic_ierrors);
break;
default:
break;
}
return (rv);
}
/*
* Prepare a vap for use. Drivers use this call to
* setup net80211 state in new vap's prior attaching
* them with ieee80211_vap_attach (below).
*/
int
ieee80211_vap_setup(struct ieee80211com *ic, struct ieee80211vap *vap,
const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode,
- int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
- const uint8_t macaddr[IEEE80211_ADDR_LEN])
+ int flags, const uint8_t bssid[IEEE80211_ADDR_LEN])
{
struct ifnet *ifp;
ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
ic_printf(ic, "%s: unable to allocate ifnet\n",
__func__);
return ENOMEM;
}
if_initname(ifp, name, unit);
ifp->if_softc = vap; /* back pointer */
ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
ifp->if_transmit = ieee80211_vap_transmit;
ifp->if_qflush = ieee80211_vap_qflush;
ifp->if_ioctl = ieee80211_ioctl;
ifp->if_init = ieee80211_init;
ifp->if_get_counter = ieee80211_get_counter;
vap->iv_ifp = ifp;
vap->iv_ic = ic;
vap->iv_flags = ic->ic_flags; /* propagate common flags */
vap->iv_flags_ext = ic->ic_flags_ext;
vap->iv_flags_ven = ic->ic_flags_ven;
vap->iv_caps = ic->ic_caps &~ IEEE80211_C_OPMODE;
vap->iv_htcaps = ic->ic_htcaps;
vap->iv_htextcaps = ic->ic_htextcaps;
vap->iv_opmode = opmode;
vap->iv_caps |= ieee80211_opcap[opmode];
+ vap->iv_myaddr = ic->ic_macaddr;
switch (opmode) {
case IEEE80211_M_WDS:
/*
* WDS links must specify the bssid of the far end.
* For legacy operation this is a static relationship.
* For non-legacy operation the station must associate
* and be authorized to pass traffic. Plumbing the
* vap to the proper node happens when the vap
* transitions to RUN state.
*/
IEEE80211_ADDR_COPY(vap->iv_des_bssid, bssid);
vap->iv_flags |= IEEE80211_F_DESBSSID;
if (flags & IEEE80211_CLONE_WDSLEGACY)
vap->iv_flags_ext |= IEEE80211_FEXT_WDSLEGACY;
break;
#ifdef IEEE80211_SUPPORT_TDMA
case IEEE80211_M_AHDEMO:
if (flags & IEEE80211_CLONE_TDMA) {
/* NB: checked before clone operation allowed */
KASSERT(ic->ic_caps & IEEE80211_C_TDMA,
("not TDMA capable, ic_caps 0x%x", ic->ic_caps));
/*
* Propagate TDMA capability to mark vap; this
* cannot be removed and is used to distinguish
* regular ahdemo operation from ahdemo+tdma.
*/
vap->iv_caps |= IEEE80211_C_TDMA;
}
break;
#endif
default:
break;
}
/* auto-enable s/w beacon miss support */
if (flags & IEEE80211_CLONE_NOBEACONS)
vap->iv_flags_ext |= IEEE80211_FEXT_SWBMISS;
/* auto-generated or user supplied MAC address */
if (flags & (IEEE80211_CLONE_BSSID|IEEE80211_CLONE_MACADDR))
vap->iv_flags_ext |= IEEE80211_FEXT_UNIQMAC;
/*
* Enable various functionality by default if we're
* capable; the driver can override us if it knows better.
*/
if (vap->iv_caps & IEEE80211_C_WME)
vap->iv_flags |= IEEE80211_F_WME;
if (vap->iv_caps & IEEE80211_C_BURST)
vap->iv_flags |= IEEE80211_F_BURST;
/* NB: bg scanning only makes sense for station mode right now */
if (vap->iv_opmode == IEEE80211_M_STA &&
(vap->iv_caps & IEEE80211_C_BGSCAN))
vap->iv_flags |= IEEE80211_F_BGSCAN;
vap->iv_flags |= IEEE80211_F_DOTH; /* XXX no cap, just ena */
/* NB: DFS support only makes sense for ap mode right now */
if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
(vap->iv_caps & IEEE80211_C_DFS))
vap->iv_flags_ext |= IEEE80211_FEXT_DFS;
vap->iv_des_chan = IEEE80211_CHAN_ANYC; /* any channel is ok */
vap->iv_bmissthreshold = IEEE80211_HWBMISS_DEFAULT;
vap->iv_dtim_period = IEEE80211_DTIM_DEFAULT;
/*
* Install a default reset method for the ioctl support;
* the driver can override this.
*/
vap->iv_reset = default_reset;
- IEEE80211_ADDR_COPY(vap->iv_myaddr, macaddr);
-
ieee80211_sysctl_vattach(vap);
ieee80211_crypto_vattach(vap);
ieee80211_node_vattach(vap);
ieee80211_power_vattach(vap);
ieee80211_proto_vattach(vap);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_superg_vattach(vap);
#endif
ieee80211_ht_vattach(vap);
ieee80211_scan_vattach(vap);
ieee80211_regdomain_vattach(vap);
ieee80211_radiotap_vattach(vap);
ieee80211_ratectl_set(vap, IEEE80211_RATECTL_NONE);
return 0;
}
/*
* Activate a vap. State should have been prepared with a
* call to ieee80211_vap_setup and by the driver. On return
* from this call the vap is ready for use.
*/
int
-ieee80211_vap_attach(struct ieee80211vap *vap,
- ifm_change_cb_t media_change, ifm_stat_cb_t media_stat)
+ieee80211_vap_attach(struct ieee80211vap *vap, ifm_change_cb_t media_change,
+ ifm_stat_cb_t media_stat, const uint8_t macaddr[IEEE80211_ADDR_LEN])
{
struct ifnet *ifp = vap->iv_ifp;
struct ieee80211com *ic = vap->iv_ic;
struct ifmediareq imr;
int maxrate;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: %s parent %s flags 0x%x flags_ext 0x%x\n",
__func__, ieee80211_opmode_name[vap->iv_opmode],
ic->ic_name, vap->iv_flags, vap->iv_flags_ext);
/*
* Do late attach work that cannot happen until after
* the driver has had a chance to override defaults.
*/
ieee80211_node_latevattach(vap);
ieee80211_power_latevattach(vap);
maxrate = ieee80211_media_setup(ic, &vap->iv_media, vap->iv_caps,
vap->iv_opmode == IEEE80211_M_STA, media_change, media_stat);
ieee80211_media_status(ifp, &imr);
/* NB: strip explicit mode; we're actually in autoselect */
ifmedia_set(&vap->iv_media,
imr.ifm_active &~ (IFM_MMASK | IFM_IEEE80211_TURBO));
if (maxrate)
ifp->if_baudrate = IF_Mbps(maxrate);
- ether_ifattach(ifp, vap->iv_myaddr);
+ ether_ifattach(ifp, macaddr);
+ vap->iv_myaddr = IF_LLADDR(ifp);
/* hook output method setup by ether_ifattach */
vap->iv_output = ifp->if_output;
ifp->if_output = ieee80211_output;
/* NB: if_mtu set by ether_ifattach to ETHERMTU */
IEEE80211_LOCK(ic);
TAILQ_INSERT_TAIL(&ic->ic_vaps, vap, iv_next);
ieee80211_syncflag_locked(ic, IEEE80211_F_WME);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP);
#endif
ieee80211_syncflag_locked(ic, IEEE80211_F_PCF);
ieee80211_syncflag_locked(ic, IEEE80211_F_BURST);
ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT);
ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40);
- ieee80211_syncifflag_locked(ic, IFF_PROMISC);
- ieee80211_syncifflag_locked(ic, IFF_ALLMULTI);
IEEE80211_UNLOCK(ic);
return 1;
}
/*
* Tear down vap state and reclaim the ifnet.
* The driver is assumed to have prepared for
* this; e.g. by turning off interrupts for the
* underlying device.
*/
void
ieee80211_vap_detach(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = vap->iv_ifp;
CURVNET_SET(ifp->if_vnet);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE, "%s: %s parent %s\n",
__func__, ieee80211_opmode_name[vap->iv_opmode], ic->ic_name);
/* NB: bpfdetach is called by ether_ifdetach and claims all taps */
ether_ifdetach(ifp);
ieee80211_stop(vap);
/*
* Flush any deferred vap tasks.
*/
ieee80211_draintask(ic, &vap->iv_nstate_task);
ieee80211_draintask(ic, &vap->iv_swbmiss_task);
/* XXX band-aid until ifnet handles this for us */
taskqueue_drain(taskqueue_swi, &ifp->if_linktask);
IEEE80211_LOCK(ic);
KASSERT(vap->iv_state == IEEE80211_S_INIT , ("vap still running"));
TAILQ_REMOVE(&ic->ic_vaps, vap, iv_next);
ieee80211_syncflag_locked(ic, IEEE80211_F_WME);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_syncflag_locked(ic, IEEE80211_F_TURBOP);
#endif
ieee80211_syncflag_locked(ic, IEEE80211_F_PCF);
ieee80211_syncflag_locked(ic, IEEE80211_F_BURST);
ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_HT);
ieee80211_syncflag_ht_locked(ic, IEEE80211_FHT_USEHT40);
/* NB: this handles the bpfdetach done below */
ieee80211_syncflag_ext_locked(ic, IEEE80211_FEXT_BPF);
- ieee80211_syncifflag_locked(ic, IFF_PROMISC);
- ieee80211_syncifflag_locked(ic, IFF_ALLMULTI);
+ if (vap->iv_ifflags & IFF_PROMISC)
+ ieee80211_promisc(vap, false);
+ if (vap->iv_ifflags & IFF_ALLMULTI)
+ ieee80211_allmulti(vap, false);
IEEE80211_UNLOCK(ic);
ifmedia_removeall(&vap->iv_media);
ieee80211_radiotap_vdetach(vap);
ieee80211_regdomain_vdetach(vap);
ieee80211_scan_vdetach(vap);
#ifdef IEEE80211_SUPPORT_SUPERG
ieee80211_superg_vdetach(vap);
#endif
ieee80211_ht_vdetach(vap);
/* NB: must be before ieee80211_node_vdetach */
ieee80211_proto_vdetach(vap);
ieee80211_crypto_vdetach(vap);
ieee80211_power_vdetach(vap);
ieee80211_node_vdetach(vap);
ieee80211_sysctl_vdetach(vap);
if_free(ifp);
CURVNET_RESTORE();
}
/*
- * Synchronize flag bit state in the parent ifnet structure
- * according to the state of all vap ifnet's. This is used,
- * for example, to handle IFF_PROMISC and IFF_ALLMULTI.
+ * Count number of vaps in promisc, and issue promisc on
+ * parent respectively.
*/
void
-ieee80211_syncifflag_locked(struct ieee80211com *ic, int flag)
+ieee80211_promisc(struct ieee80211vap *vap, bool on)
{
- struct ifnet *ifp = ic->ic_ifp;
- struct ieee80211vap *vap;
- int bit, oflags;
+ struct ieee80211com *ic = vap->iv_ic;
- IEEE80211_LOCK_ASSERT(ic);
+ /*
+ * XXX the bridge sets PROMISC but we don't want to
+ * enable it on the device, discard here so all the
+ * drivers don't need to special-case it
+ */
+ if (!(vap->iv_opmode == IEEE80211_M_MONITOR ||
+ (vap->iv_opmode == IEEE80211_M_AHDEMO &&
+ (vap->iv_caps & IEEE80211_C_TDMA) == 0)))
+ return;
- bit = 0;
- TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
- if (vap->iv_ifp->if_flags & flag) {
- /*
- * XXX the bridge sets PROMISC but we don't want to
- * enable it on the device, discard here so all the
- * drivers don't need to special-case it
- */
- if (flag == IFF_PROMISC &&
- !(vap->iv_opmode == IEEE80211_M_MONITOR ||
- (vap->iv_opmode == IEEE80211_M_AHDEMO &&
- (vap->iv_caps & IEEE80211_C_TDMA) == 0)))
- continue;
- bit = 1;
- break;
- }
- oflags = ifp->if_flags;
- if (bit)
- ifp->if_flags |= flag;
- else
- ifp->if_flags &= ~flag;
- if ((ifp->if_flags ^ oflags) & flag) {
- /* XXX should we return 1/0 and let caller do this? */
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- if (flag == IFF_PROMISC)
- ieee80211_runtask(ic, &ic->ic_promisc_task);
- else if (flag == IFF_ALLMULTI)
- ieee80211_runtask(ic, &ic->ic_mcast_task);
- }
+ IEEE80211_LOCK(ic);
+ if (on) {
+ if (++ic->ic_promisc == 1)
+ ieee80211_runtask(ic, &ic->ic_promisc_task);
+ } else {
+ KASSERT(ic->ic_promisc > 0, ("%s: ic %p not promisc",
+ __func__, ic));
+ if (--ic->ic_promisc == 0)
+ ieee80211_runtask(ic, &ic->ic_promisc_task);
}
+ IEEE80211_UNLOCK(ic);
}
/*
+ * Count number of vaps in allmulti, and issue allmulti on
+ * parent respectively.
+ */
+void
+ieee80211_allmulti(struct ieee80211vap *vap, bool on)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+
+ IEEE80211_LOCK(ic);
+ if (on) {
+ if (++ic->ic_allmulti == 1)
+ ieee80211_runtask(ic, &ic->ic_mcast_task);
+ } else {
+ KASSERT(ic->ic_allmulti > 0, ("%s: ic %p not allmulti",
+ __func__, ic));
+ if (--ic->ic_allmulti == 0)
+ ieee80211_runtask(ic, &ic->ic_mcast_task);
+ }
+ IEEE80211_UNLOCK(ic);
+}
+
+/*
* Synchronize flag bit state in the com structure
* according to the state of all vap's. This is used,
* for example, to handle state changes via ioctls.
*/
static void
ieee80211_syncflag_locked(struct ieee80211com *ic, int flag)
{
struct ieee80211vap *vap;
int bit;
IEEE80211_LOCK_ASSERT(ic);
bit = 0;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_flags & flag) {
bit = 1;
break;
}
if (bit)
ic->ic_flags |= flag;
else
ic->ic_flags &= ~flag;
}
void
ieee80211_syncflag(struct ieee80211vap *vap, int flag)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
if (flag < 0) {
flag = -flag;
vap->iv_flags &= ~flag;
} else
vap->iv_flags |= flag;
ieee80211_syncflag_locked(ic, flag);
IEEE80211_UNLOCK(ic);
}
/*
* Synchronize flags_ht bit state in the com structure
* according to the state of all vap's. This is used,
* for example, to handle state changes via ioctls.
*/
static void
ieee80211_syncflag_ht_locked(struct ieee80211com *ic, int flag)
{
struct ieee80211vap *vap;
int bit;
IEEE80211_LOCK_ASSERT(ic);
bit = 0;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_flags_ht & flag) {
bit = 1;
break;
}
if (bit)
ic->ic_flags_ht |= flag;
else
ic->ic_flags_ht &= ~flag;
}
void
ieee80211_syncflag_ht(struct ieee80211vap *vap, int flag)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
if (flag < 0) {
flag = -flag;
vap->iv_flags_ht &= ~flag;
} else
vap->iv_flags_ht |= flag;
ieee80211_syncflag_ht_locked(ic, flag);
IEEE80211_UNLOCK(ic);
}
/*
* Synchronize flags_ext bit state in the com structure
* according to the state of all vap's. This is used,
* for example, to handle state changes via ioctls.
*/
static void
ieee80211_syncflag_ext_locked(struct ieee80211com *ic, int flag)
{
struct ieee80211vap *vap;
int bit;
IEEE80211_LOCK_ASSERT(ic);
bit = 0;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_flags_ext & flag) {
bit = 1;
break;
}
if (bit)
ic->ic_flags_ext |= flag;
else
ic->ic_flags_ext &= ~flag;
}
void
ieee80211_syncflag_ext(struct ieee80211vap *vap, int flag)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
if (flag < 0) {
flag = -flag;
vap->iv_flags_ext &= ~flag;
} else
vap->iv_flags_ext |= flag;
ieee80211_syncflag_ext_locked(ic, flag);
IEEE80211_UNLOCK(ic);
}
static __inline int
mapgsm(u_int freq, u_int flags)
{
freq *= 10;
if (flags & IEEE80211_CHAN_QUARTER)
freq += 5;
else if (flags & IEEE80211_CHAN_HALF)
freq += 10;
else
freq += 20;
/* NB: there is no 907/20 wide but leave room */
return (freq - 906*10) / 5;
}
static __inline int
mappsb(u_int freq, u_int flags)
{
return 37 + ((freq * 10) + ((freq % 5) == 2 ? 5 : 0) - 49400) / 5;
}
/*
* Convert MHz frequency to IEEE channel number.
*/
int
ieee80211_mhz2ieee(u_int freq, u_int flags)
{
#define IS_FREQ_IN_PSB(_freq) ((_freq) > 4940 && (_freq) < 4990)
if (flags & IEEE80211_CHAN_GSM)
return mapgsm(freq, flags);
if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */
if (freq == 2484)
return 14;
if (freq < 2484)
return ((int) freq - 2407) / 5;
else
return 15 + ((freq - 2512) / 20);
} else if (flags & IEEE80211_CHAN_5GHZ) { /* 5Ghz band */
if (freq <= 5000) {
/* XXX check regdomain? */
if (IS_FREQ_IN_PSB(freq))
return mappsb(freq, flags);
return (freq - 4000) / 5;
} else
return (freq - 5000) / 5;
} else { /* either, guess */
if (freq == 2484)
return 14;
if (freq < 2484) {
if (907 <= freq && freq <= 922)
return mapgsm(freq, flags);
return ((int) freq - 2407) / 5;
}
if (freq < 5000) {
if (IS_FREQ_IN_PSB(freq))
return mappsb(freq, flags);
else if (freq > 4900)
return (freq - 4000) / 5;
else
return 15 + ((freq - 2512) / 20);
}
return (freq - 5000) / 5;
}
#undef IS_FREQ_IN_PSB
}
/*
* Convert channel to IEEE channel number.
*/
int
ieee80211_chan2ieee(struct ieee80211com *ic, const struct ieee80211_channel *c)
{
if (c == NULL) {
ic_printf(ic, "invalid channel (NULL)\n");
return 0; /* XXX */
}
return (c == IEEE80211_CHAN_ANYC ? IEEE80211_CHAN_ANY : c->ic_ieee);
}
/*
* Convert IEEE channel number to MHz frequency.
*/
u_int
ieee80211_ieee2mhz(u_int chan, u_int flags)
{
if (flags & IEEE80211_CHAN_GSM)
return 907 + 5 * (chan / 10);
if (flags & IEEE80211_CHAN_2GHZ) { /* 2GHz band */
if (chan == 14)
return 2484;
if (chan < 14)
return 2407 + chan*5;
else
return 2512 + ((chan-15)*20);
} else if (flags & IEEE80211_CHAN_5GHZ) {/* 5Ghz band */
if (flags & (IEEE80211_CHAN_HALF|IEEE80211_CHAN_QUARTER)) {
chan -= 37;
return 4940 + chan*5 + (chan % 5 ? 2 : 0);
}
return 5000 + (chan*5);
} else { /* either, guess */
/* XXX can't distinguish PSB+GSM channels */
if (chan == 14)
return 2484;
if (chan < 14) /* 0-13 */
return 2407 + chan*5;
if (chan < 27) /* 15-26 */
return 2512 + ((chan-15)*20);
return 5000 + (chan*5);
}
}
/*
* Locate a channel given a frequency+flags. We cache
* the previous lookup to optimize switching between two
* channels--as happens with dynamic turbo.
*/
struct ieee80211_channel *
ieee80211_find_channel(struct ieee80211com *ic, int freq, int flags)
{
struct ieee80211_channel *c;
int i;
flags &= IEEE80211_CHAN_ALLTURBO;
c = ic->ic_prevchan;
if (c != NULL && c->ic_freq == freq &&
(c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
return c;
/* brute force search */
for (i = 0; i < ic->ic_nchans; i++) {
c = &ic->ic_channels[i];
if (c->ic_freq == freq &&
(c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
return c;
}
return NULL;
}
/*
* Locate a channel given a channel number+flags. We cache
* the previous lookup to optimize switching between two
* channels--as happens with dynamic turbo.
*/
struct ieee80211_channel *
ieee80211_find_channel_byieee(struct ieee80211com *ic, int ieee, int flags)
{
struct ieee80211_channel *c;
int i;
flags &= IEEE80211_CHAN_ALLTURBO;
c = ic->ic_prevchan;
if (c != NULL && c->ic_ieee == ieee &&
(c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
return c;
/* brute force search */
for (i = 0; i < ic->ic_nchans; i++) {
c = &ic->ic_channels[i];
if (c->ic_ieee == ieee &&
(c->ic_flags & IEEE80211_CHAN_ALLTURBO) == flags)
return c;
}
return NULL;
}
/*
* Lookup a channel suitable for the given rx status.
*
* This is used to find a channel for a frame (eg beacon, probe
* response) based purely on the received PHY information.
*
* For now it tries to do it based on R_FREQ / R_IEEE.
* This is enough for 11bg and 11a (and thus 11ng/11na)
* but it will not be enough for GSM, PSB channels and the
* like. It also doesn't know about legacy-turbog and
* legacy-turbo modes, which some offload NICs actually
* support in weird ways.
*
* Takes the ic and rxstatus; returns the channel or NULL
* if not found.
*
* XXX TODO: Add support for that when the need arises.
*/
struct ieee80211_channel *
ieee80211_lookup_channel_rxstatus(struct ieee80211vap *vap,
const struct ieee80211_rx_stats *rxs)
{
struct ieee80211com *ic = vap->iv_ic;
uint32_t flags;
struct ieee80211_channel *c;
if (rxs == NULL)
return (NULL);
/*
* Strictly speaking we only use freq for now,
* however later on we may wish to just store
* the ieee for verification.
*/
if ((rxs->r_flags & IEEE80211_R_FREQ) == 0)
return (NULL);
if ((rxs->r_flags & IEEE80211_R_IEEE) == 0)
return (NULL);
/*
* If the rx status contains a valid ieee/freq, then
* ensure we populate the correct channel information
* in rxchan before passing it up to the scan infrastructure.
* Offload NICs will pass up beacons from all channels
* during background scans.
*/
/* Determine a band */
/* XXX should be done by the driver? */
if (rxs->c_freq < 3000) {
flags = IEEE80211_CHAN_G;
} else {
flags = IEEE80211_CHAN_A;
}
/* Channel lookup */
c = ieee80211_find_channel(ic, rxs->c_freq, flags);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_INPUT,
"%s: freq=%d, ieee=%d, flags=0x%08x; c=%p\n",
__func__,
(int) rxs->c_freq,
(int) rxs->c_ieee,
flags,
c);
return (c);
}
static void
addmedia(struct ifmedia *media, int caps, int addsta, int mode, int mword)
{
#define ADD(_ic, _s, _o) \
ifmedia_add(media, \
IFM_MAKEWORD(IFM_IEEE80211, (_s), (_o), 0), 0, NULL)
static const u_int mopts[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = IFM_AUTO,
[IEEE80211_MODE_11A] = IFM_IEEE80211_11A,
[IEEE80211_MODE_11B] = IFM_IEEE80211_11B,
[IEEE80211_MODE_11G] = IFM_IEEE80211_11G,
[IEEE80211_MODE_FH] = IFM_IEEE80211_FH,
[IEEE80211_MODE_TURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO,
[IEEE80211_MODE_TURBO_G] = IFM_IEEE80211_11G|IFM_IEEE80211_TURBO,
[IEEE80211_MODE_STURBO_A] = IFM_IEEE80211_11A|IFM_IEEE80211_TURBO,
[IEEE80211_MODE_HALF] = IFM_IEEE80211_11A, /* XXX */
[IEEE80211_MODE_QUARTER] = IFM_IEEE80211_11A, /* XXX */
[IEEE80211_MODE_11NA] = IFM_IEEE80211_11NA,
[IEEE80211_MODE_11NG] = IFM_IEEE80211_11NG,
};
u_int mopt;
mopt = mopts[mode];
if (addsta)
ADD(ic, mword, mopt); /* STA mode has no cap */
if (caps & IEEE80211_C_IBSS)
ADD(media, mword, mopt | IFM_IEEE80211_ADHOC);
if (caps & IEEE80211_C_HOSTAP)
ADD(media, mword, mopt | IFM_IEEE80211_HOSTAP);
if (caps & IEEE80211_C_AHDEMO)
ADD(media, mword, mopt | IFM_IEEE80211_ADHOC | IFM_FLAG0);
if (caps & IEEE80211_C_MONITOR)
ADD(media, mword, mopt | IFM_IEEE80211_MONITOR);
if (caps & IEEE80211_C_WDS)
ADD(media, mword, mopt | IFM_IEEE80211_WDS);
if (caps & IEEE80211_C_MBSS)
ADD(media, mword, mopt | IFM_IEEE80211_MBSS);
#undef ADD
}
/*
* Setup the media data structures according to the channel and
* rate tables.
*/
static int
ieee80211_media_setup(struct ieee80211com *ic,
struct ifmedia *media, int caps, int addsta,
ifm_change_cb_t media_change, ifm_stat_cb_t media_stat)
{
int i, j, rate, maxrate, mword, r;
enum ieee80211_phymode mode;
const struct ieee80211_rateset *rs;
struct ieee80211_rateset allrates;
/*
* Fill in media characteristics.
*/
ifmedia_init(media, 0, media_change, media_stat);
maxrate = 0;
/*
* Add media for legacy operating modes.
*/
memset(&allrates, 0, sizeof(allrates));
for (mode = IEEE80211_MODE_AUTO; mode < IEEE80211_MODE_11NA; mode++) {
if (isclr(ic->ic_modecaps, mode))
continue;
addmedia(media, caps, addsta, mode, IFM_AUTO);
if (mode == IEEE80211_MODE_AUTO)
continue;
rs = &ic->ic_sup_rates[mode];
for (i = 0; i < rs->rs_nrates; i++) {
rate = rs->rs_rates[i];
mword = ieee80211_rate2media(ic, rate, mode);
if (mword == 0)
continue;
addmedia(media, caps, addsta, mode, mword);
/*
* Add legacy rate to the collection of all rates.
*/
r = rate & IEEE80211_RATE_VAL;
for (j = 0; j < allrates.rs_nrates; j++)
if (allrates.rs_rates[j] == r)
break;
if (j == allrates.rs_nrates) {
/* unique, add to the set */
allrates.rs_rates[j] = r;
allrates.rs_nrates++;
}
rate = (rate & IEEE80211_RATE_VAL) / 2;
if (rate > maxrate)
maxrate = rate;
}
}
for (i = 0; i < allrates.rs_nrates; i++) {
mword = ieee80211_rate2media(ic, allrates.rs_rates[i],
IEEE80211_MODE_AUTO);
if (mword == 0)
continue;
/* NB: remove media options from mword */
addmedia(media, caps, addsta,
IEEE80211_MODE_AUTO, IFM_SUBTYPE(mword));
}
/*
* Add HT/11n media. Note that we do not have enough
* bits in the media subtype to express the MCS so we
* use a "placeholder" media subtype and any fixed MCS
* must be specified with a different mechanism.
*/
for (; mode <= IEEE80211_MODE_11NG; mode++) {
if (isclr(ic->ic_modecaps, mode))
continue;
addmedia(media, caps, addsta, mode, IFM_AUTO);
addmedia(media, caps, addsta, mode, IFM_IEEE80211_MCS);
}
if (isset(ic->ic_modecaps, IEEE80211_MODE_11NA) ||
isset(ic->ic_modecaps, IEEE80211_MODE_11NG)) {
addmedia(media, caps, addsta,
IEEE80211_MODE_AUTO, IFM_IEEE80211_MCS);
i = ic->ic_txstream * 8 - 1;
if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) &&
(ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI40))
rate = ieee80211_htrates[i].ht40_rate_400ns;
else if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40))
rate = ieee80211_htrates[i].ht40_rate_800ns;
else if ((ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI20))
rate = ieee80211_htrates[i].ht20_rate_400ns;
else
rate = ieee80211_htrates[i].ht20_rate_800ns;
if (rate > maxrate)
maxrate = rate;
}
return maxrate;
}
-void
-ieee80211_media_init(struct ieee80211com *ic)
-{
- struct ifnet *ifp = ic->ic_ifp;
- int maxrate;
-
- /* NB: this works because the structure is initialized to zero */
- if (!LIST_EMPTY(&ic->ic_media.ifm_list)) {
- /*
- * We are re-initializing the channel list; clear
- * the existing media state as the media routines
- * don't suppress duplicates.
- */
- ifmedia_removeall(&ic->ic_media);
- }
- ieee80211_chan_init(ic);
-
- /*
- * Recalculate media settings in case new channel list changes
- * the set of available modes.
- */
- maxrate = ieee80211_media_setup(ic, &ic->ic_media, ic->ic_caps, 1,
- ieee80211com_media_change, ieee80211com_media_status);
- /* NB: strip explicit mode; we're actually in autoselect */
- ifmedia_set(&ic->ic_media,
- media_status(ic->ic_opmode, ic->ic_curchan) &~
- (IFM_MMASK | IFM_IEEE80211_TURBO));
- if (maxrate)
- ifp->if_baudrate = IF_Mbps(maxrate);
-
- /* XXX need to propagate new media settings to vap's */
-}
-
/* XXX inline or eliminate? */
const struct ieee80211_rateset *
ieee80211_get_suprates(struct ieee80211com *ic, const struct ieee80211_channel *c)
{
/* XXX does this work for 11ng basic rates? */
return &ic->ic_sup_rates[ieee80211_chan2mode(c)];
}
void
ieee80211_announce(struct ieee80211com *ic)
{
int i, rate, mword;
enum ieee80211_phymode mode;
const struct ieee80211_rateset *rs;
/* NB: skip AUTO since it has no rates */
for (mode = IEEE80211_MODE_AUTO+1; mode < IEEE80211_MODE_11NA; mode++) {
if (isclr(ic->ic_modecaps, mode))
continue;
ic_printf(ic, "%s rates: ", ieee80211_phymode_name[mode]);
rs = &ic->ic_sup_rates[mode];
for (i = 0; i < rs->rs_nrates; i++) {
mword = ieee80211_rate2media(ic, rs->rs_rates[i], mode);
if (mword == 0)
continue;
rate = ieee80211_media2rate(mword);
printf("%s%d%sMbps", (i != 0 ? " " : ""),
rate / 2, ((rate & 0x1) != 0 ? ".5" : ""));
}
printf("\n");
}
ieee80211_ht_announce(ic);
}
void
ieee80211_announce_channels(struct ieee80211com *ic)
{
const struct ieee80211_channel *c;
char type;
int i, cw;
printf("Chan Freq CW RegPwr MinPwr MaxPwr\n");
for (i = 0; i < ic->ic_nchans; i++) {
c = &ic->ic_channels[i];
if (IEEE80211_IS_CHAN_ST(c))
type = 'S';
else if (IEEE80211_IS_CHAN_108A(c))
type = 'T';
else if (IEEE80211_IS_CHAN_108G(c))
type = 'G';
else if (IEEE80211_IS_CHAN_HT(c))
type = 'n';
else if (IEEE80211_IS_CHAN_A(c))
type = 'a';
else if (IEEE80211_IS_CHAN_ANYG(c))
type = 'g';
else if (IEEE80211_IS_CHAN_B(c))
type = 'b';
else
type = 'f';
if (IEEE80211_IS_CHAN_HT40(c) || IEEE80211_IS_CHAN_TURBO(c))
cw = 40;
else if (IEEE80211_IS_CHAN_HALF(c))
cw = 10;
else if (IEEE80211_IS_CHAN_QUARTER(c))
cw = 5;
else
cw = 20;
printf("%4d %4d%c %2d%c %6d %4d.%d %4d.%d\n"
, c->ic_ieee, c->ic_freq, type
, cw
, IEEE80211_IS_CHAN_HT40U(c) ? '+' :
IEEE80211_IS_CHAN_HT40D(c) ? '-' : ' '
, c->ic_maxregpower
, c->ic_minpower / 2, c->ic_minpower & 1 ? 5 : 0
, c->ic_maxpower / 2, c->ic_maxpower & 1 ? 5 : 0
);
}
}
static int
media2mode(const struct ifmedia_entry *ime, uint32_t flags, uint16_t *mode)
{
switch (IFM_MODE(ime->ifm_media)) {
case IFM_IEEE80211_11A:
*mode = IEEE80211_MODE_11A;
break;
case IFM_IEEE80211_11B:
*mode = IEEE80211_MODE_11B;
break;
case IFM_IEEE80211_11G:
*mode = IEEE80211_MODE_11G;
break;
case IFM_IEEE80211_FH:
*mode = IEEE80211_MODE_FH;
break;
case IFM_IEEE80211_11NA:
*mode = IEEE80211_MODE_11NA;
break;
case IFM_IEEE80211_11NG:
*mode = IEEE80211_MODE_11NG;
break;
case IFM_AUTO:
*mode = IEEE80211_MODE_AUTO;
break;
default:
return 0;
}
/*
* Turbo mode is an ``option''.
* XXX does not apply to AUTO
*/
if (ime->ifm_media & IFM_IEEE80211_TURBO) {
if (*mode == IEEE80211_MODE_11A) {
if (flags & IEEE80211_F_TURBOP)
*mode = IEEE80211_MODE_TURBO_A;
else
*mode = IEEE80211_MODE_STURBO_A;
} else if (*mode == IEEE80211_MODE_11G)
*mode = IEEE80211_MODE_TURBO_G;
else
return 0;
}
/* XXX HT40 +/- */
return 1;
}
/*
- * Handle a media change request on the underlying interface.
- */
-int
-ieee80211com_media_change(struct ifnet *ifp)
-{
- return EINVAL;
-}
-
-/*
* Handle a media change request on the vap interface.
*/
int
ieee80211_media_change(struct ifnet *ifp)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ifmedia_entry *ime = vap->iv_media.ifm_cur;
uint16_t newmode;
if (!media2mode(ime, vap->iv_flags, &newmode))
return EINVAL;
if (vap->iv_des_mode != newmode) {
vap->iv_des_mode = newmode;
/* XXX kick state machine if up+running */
}
return 0;
}
/*
* Common code to calculate the media status word
* from the operating mode and channel state.
*/
static int
media_status(enum ieee80211_opmode opmode, const struct ieee80211_channel *chan)
{
int status;
status = IFM_IEEE80211;
switch (opmode) {
case IEEE80211_M_STA:
break;
case IEEE80211_M_IBSS:
status |= IFM_IEEE80211_ADHOC;
break;
case IEEE80211_M_HOSTAP:
status |= IFM_IEEE80211_HOSTAP;
break;
case IEEE80211_M_MONITOR:
status |= IFM_IEEE80211_MONITOR;
break;
case IEEE80211_M_AHDEMO:
status |= IFM_IEEE80211_ADHOC | IFM_FLAG0;
break;
case IEEE80211_M_WDS:
status |= IFM_IEEE80211_WDS;
break;
case IEEE80211_M_MBSS:
status |= IFM_IEEE80211_MBSS;
break;
}
if (IEEE80211_IS_CHAN_HTA(chan)) {
status |= IFM_IEEE80211_11NA;
} else if (IEEE80211_IS_CHAN_HTG(chan)) {
status |= IFM_IEEE80211_11NG;
} else if (IEEE80211_IS_CHAN_A(chan)) {
status |= IFM_IEEE80211_11A;
} else if (IEEE80211_IS_CHAN_B(chan)) {
status |= IFM_IEEE80211_11B;
} else if (IEEE80211_IS_CHAN_ANYG(chan)) {
status |= IFM_IEEE80211_11G;
} else if (IEEE80211_IS_CHAN_FHSS(chan)) {
status |= IFM_IEEE80211_FH;
}
/* XXX else complain? */
if (IEEE80211_IS_CHAN_TURBO(chan))
status |= IFM_IEEE80211_TURBO;
#if 0
if (IEEE80211_IS_CHAN_HT20(chan))
status |= IFM_IEEE80211_HT20;
if (IEEE80211_IS_CHAN_HT40(chan))
status |= IFM_IEEE80211_HT40;
#endif
return status;
-}
-
-static void
-ieee80211com_media_status(struct ifnet *ifp, struct ifmediareq *imr)
-{
- struct ieee80211com *ic = ifp->if_l2com;
- struct ieee80211vap *vap;
-
- imr->ifm_status = IFM_AVALID;
- TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
- if (vap->iv_ifp->if_flags & IFF_UP) {
- imr->ifm_status |= IFM_ACTIVE;
- break;
- }
- imr->ifm_active = media_status(ic->ic_opmode, ic->ic_curchan);
- if (imr->ifm_status & IFM_ACTIVE)
- imr->ifm_current = imr->ifm_active;
}
void
ieee80211_media_status(struct ifnet *ifp, struct ifmediareq *imr)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
enum ieee80211_phymode mode;
imr->ifm_status = IFM_AVALID;
/*
* NB: use the current channel's mode to lock down a xmit
* rate only when running; otherwise we may have a mismatch
* in which case the rate will not be convertible.
*/
if (vap->iv_state == IEEE80211_S_RUN ||
vap->iv_state == IEEE80211_S_SLEEP) {
imr->ifm_status |= IFM_ACTIVE;
mode = ieee80211_chan2mode(ic->ic_curchan);
} else
mode = IEEE80211_MODE_AUTO;
imr->ifm_active = media_status(vap->iv_opmode, ic->ic_curchan);
/*
* Calculate a current rate if possible.
*/
if (vap->iv_txparms[mode].ucastrate != IEEE80211_FIXED_RATE_NONE) {
/*
* A fixed rate is set, report that.
*/
imr->ifm_active |= ieee80211_rate2media(ic,
vap->iv_txparms[mode].ucastrate, mode);
} else if (vap->iv_opmode == IEEE80211_M_STA) {
/*
* In station mode report the current transmit rate.
*/
imr->ifm_active |= ieee80211_rate2media(ic,
vap->iv_bss->ni_txrate, mode);
} else
imr->ifm_active |= IFM_AUTO;
if (imr->ifm_status & IFM_ACTIVE)
imr->ifm_current = imr->ifm_active;
}
/*
* Set the current phy mode and recalculate the active channel
* set based on the available channels for this mode. Also
* select a new default/current channel if the current one is
* inappropriate for this mode.
*/
int
ieee80211_setmode(struct ieee80211com *ic, enum ieee80211_phymode mode)
{
/*
* Adjust basic rates in 11b/11g supported rate set.
* Note that if operating on a hal/quarter rate channel
* this is a noop as those rates sets are different
* and used instead.
*/
if (mode == IEEE80211_MODE_11G || mode == IEEE80211_MODE_11B)
ieee80211_setbasicrates(&ic->ic_sup_rates[mode], mode);
ic->ic_curmode = mode;
ieee80211_reset_erp(ic); /* reset ERP state */
return 0;
}
/*
* Return the phy mode for with the specified channel.
*/
enum ieee80211_phymode
ieee80211_chan2mode(const struct ieee80211_channel *chan)
{
if (IEEE80211_IS_CHAN_HTA(chan))
return IEEE80211_MODE_11NA;
else if (IEEE80211_IS_CHAN_HTG(chan))
return IEEE80211_MODE_11NG;
else if (IEEE80211_IS_CHAN_108G(chan))
return IEEE80211_MODE_TURBO_G;
else if (IEEE80211_IS_CHAN_ST(chan))
return IEEE80211_MODE_STURBO_A;
else if (IEEE80211_IS_CHAN_TURBO(chan))
return IEEE80211_MODE_TURBO_A;
else if (IEEE80211_IS_CHAN_HALF(chan))
return IEEE80211_MODE_HALF;
else if (IEEE80211_IS_CHAN_QUARTER(chan))
return IEEE80211_MODE_QUARTER;
else if (IEEE80211_IS_CHAN_A(chan))
return IEEE80211_MODE_11A;
else if (IEEE80211_IS_CHAN_ANYG(chan))
return IEEE80211_MODE_11G;
else if (IEEE80211_IS_CHAN_B(chan))
return IEEE80211_MODE_11B;
else if (IEEE80211_IS_CHAN_FHSS(chan))
return IEEE80211_MODE_FH;
/* NB: should not get here */
printf("%s: cannot map channel to mode; freq %u flags 0x%x\n",
__func__, chan->ic_freq, chan->ic_flags);
return IEEE80211_MODE_11B;
}
struct ratemedia {
u_int match; /* rate + mode */
u_int media; /* if_media rate */
};
static int
findmedia(const struct ratemedia rates[], int n, u_int match)
{
int i;
for (i = 0; i < n; i++)
if (rates[i].match == match)
return rates[i].media;
return IFM_AUTO;
}
/*
* Convert IEEE80211 rate value to ifmedia subtype.
* Rate is either a legacy rate in units of 0.5Mbps
* or an MCS index.
*/
int
ieee80211_rate2media(struct ieee80211com *ic, int rate, enum ieee80211_phymode mode)
{
static const struct ratemedia rates[] = {
{ 2 | IFM_IEEE80211_FH, IFM_IEEE80211_FH1 },
{ 4 | IFM_IEEE80211_FH, IFM_IEEE80211_FH2 },
{ 2 | IFM_IEEE80211_11B, IFM_IEEE80211_DS1 },
{ 4 | IFM_IEEE80211_11B, IFM_IEEE80211_DS2 },
{ 11 | IFM_IEEE80211_11B, IFM_IEEE80211_DS5 },
{ 22 | IFM_IEEE80211_11B, IFM_IEEE80211_DS11 },
{ 44 | IFM_IEEE80211_11B, IFM_IEEE80211_DS22 },
{ 12 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM6 },
{ 18 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM9 },
{ 24 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM12 },
{ 36 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM18 },
{ 48 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM24 },
{ 72 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM36 },
{ 96 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM48 },
{ 108 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM54 },
{ 2 | IFM_IEEE80211_11G, IFM_IEEE80211_DS1 },
{ 4 | IFM_IEEE80211_11G, IFM_IEEE80211_DS2 },
{ 11 | IFM_IEEE80211_11G, IFM_IEEE80211_DS5 },
{ 22 | IFM_IEEE80211_11G, IFM_IEEE80211_DS11 },
{ 12 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM6 },
{ 18 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM9 },
{ 24 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM12 },
{ 36 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM18 },
{ 48 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM24 },
{ 72 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM36 },
{ 96 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM48 },
{ 108 | IFM_IEEE80211_11G, IFM_IEEE80211_OFDM54 },
{ 6 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM3 },
{ 9 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM4 },
{ 54 | IFM_IEEE80211_11A, IFM_IEEE80211_OFDM27 },
/* NB: OFDM72 doesn't realy exist so we don't handle it */
};
static const struct ratemedia htrates[] = {
{ 0, IFM_IEEE80211_MCS },
{ 1, IFM_IEEE80211_MCS },
{ 2, IFM_IEEE80211_MCS },
{ 3, IFM_IEEE80211_MCS },
{ 4, IFM_IEEE80211_MCS },
{ 5, IFM_IEEE80211_MCS },
{ 6, IFM_IEEE80211_MCS },
{ 7, IFM_IEEE80211_MCS },
{ 8, IFM_IEEE80211_MCS },
{ 9, IFM_IEEE80211_MCS },
{ 10, IFM_IEEE80211_MCS },
{ 11, IFM_IEEE80211_MCS },
{ 12, IFM_IEEE80211_MCS },
{ 13, IFM_IEEE80211_MCS },
{ 14, IFM_IEEE80211_MCS },
{ 15, IFM_IEEE80211_MCS },
{ 16, IFM_IEEE80211_MCS },
{ 17, IFM_IEEE80211_MCS },
{ 18, IFM_IEEE80211_MCS },
{ 19, IFM_IEEE80211_MCS },
{ 20, IFM_IEEE80211_MCS },
{ 21, IFM_IEEE80211_MCS },
{ 22, IFM_IEEE80211_MCS },
{ 23, IFM_IEEE80211_MCS },
{ 24, IFM_IEEE80211_MCS },
{ 25, IFM_IEEE80211_MCS },
{ 26, IFM_IEEE80211_MCS },
{ 27, IFM_IEEE80211_MCS },
{ 28, IFM_IEEE80211_MCS },
{ 29, IFM_IEEE80211_MCS },
{ 30, IFM_IEEE80211_MCS },
{ 31, IFM_IEEE80211_MCS },
{ 32, IFM_IEEE80211_MCS },
{ 33, IFM_IEEE80211_MCS },
{ 34, IFM_IEEE80211_MCS },
{ 35, IFM_IEEE80211_MCS },
{ 36, IFM_IEEE80211_MCS },
{ 37, IFM_IEEE80211_MCS },
{ 38, IFM_IEEE80211_MCS },
{ 39, IFM_IEEE80211_MCS },
{ 40, IFM_IEEE80211_MCS },
{ 41, IFM_IEEE80211_MCS },
{ 42, IFM_IEEE80211_MCS },
{ 43, IFM_IEEE80211_MCS },
{ 44, IFM_IEEE80211_MCS },
{ 45, IFM_IEEE80211_MCS },
{ 46, IFM_IEEE80211_MCS },
{ 47, IFM_IEEE80211_MCS },
{ 48, IFM_IEEE80211_MCS },
{ 49, IFM_IEEE80211_MCS },
{ 50, IFM_IEEE80211_MCS },
{ 51, IFM_IEEE80211_MCS },
{ 52, IFM_IEEE80211_MCS },
{ 53, IFM_IEEE80211_MCS },
{ 54, IFM_IEEE80211_MCS },
{ 55, IFM_IEEE80211_MCS },
{ 56, IFM_IEEE80211_MCS },
{ 57, IFM_IEEE80211_MCS },
{ 58, IFM_IEEE80211_MCS },
{ 59, IFM_IEEE80211_MCS },
{ 60, IFM_IEEE80211_MCS },
{ 61, IFM_IEEE80211_MCS },
{ 62, IFM_IEEE80211_MCS },
{ 63, IFM_IEEE80211_MCS },
{ 64, IFM_IEEE80211_MCS },
{ 65, IFM_IEEE80211_MCS },
{ 66, IFM_IEEE80211_MCS },
{ 67, IFM_IEEE80211_MCS },
{ 68, IFM_IEEE80211_MCS },
{ 69, IFM_IEEE80211_MCS },
{ 70, IFM_IEEE80211_MCS },
{ 71, IFM_IEEE80211_MCS },
{ 72, IFM_IEEE80211_MCS },
{ 73, IFM_IEEE80211_MCS },
{ 74, IFM_IEEE80211_MCS },
{ 75, IFM_IEEE80211_MCS },
{ 76, IFM_IEEE80211_MCS },
};
int m;
/*
* Check 11n rates first for match as an MCS.
*/
if (mode == IEEE80211_MODE_11NA) {
if (rate & IEEE80211_RATE_MCS) {
rate &= ~IEEE80211_RATE_MCS;
m = findmedia(htrates, nitems(htrates), rate);
if (m != IFM_AUTO)
return m | IFM_IEEE80211_11NA;
}
} else if (mode == IEEE80211_MODE_11NG) {
/* NB: 12 is ambiguous, it will be treated as an MCS */
if (rate & IEEE80211_RATE_MCS) {
rate &= ~IEEE80211_RATE_MCS;
m = findmedia(htrates, nitems(htrates), rate);
if (m != IFM_AUTO)
return m | IFM_IEEE80211_11NG;
}
}
rate &= IEEE80211_RATE_VAL;
switch (mode) {
case IEEE80211_MODE_11A:
case IEEE80211_MODE_HALF: /* XXX good 'nuf */
case IEEE80211_MODE_QUARTER:
case IEEE80211_MODE_11NA:
case IEEE80211_MODE_TURBO_A:
case IEEE80211_MODE_STURBO_A:
return findmedia(rates, nitems(rates),
rate | IFM_IEEE80211_11A);
case IEEE80211_MODE_11B:
return findmedia(rates, nitems(rates),
rate | IFM_IEEE80211_11B);
case IEEE80211_MODE_FH:
return findmedia(rates, nitems(rates),
rate | IFM_IEEE80211_FH);
case IEEE80211_MODE_AUTO:
/* NB: ic may be NULL for some drivers */
if (ic != NULL && ic->ic_phytype == IEEE80211_T_FH)
return findmedia(rates, nitems(rates),
rate | IFM_IEEE80211_FH);
/* NB: hack, 11g matches both 11b+11a rates */
/* fall thru... */
case IEEE80211_MODE_11G:
case IEEE80211_MODE_11NG:
case IEEE80211_MODE_TURBO_G:
return findmedia(rates, nitems(rates), rate | IFM_IEEE80211_11G);
}
return IFM_AUTO;
}
int
ieee80211_media2rate(int mword)
{
static const int ieeerates[] = {
-1, /* IFM_AUTO */
0, /* IFM_MANUAL */
0, /* IFM_NONE */
2, /* IFM_IEEE80211_FH1 */
4, /* IFM_IEEE80211_FH2 */
2, /* IFM_IEEE80211_DS1 */
4, /* IFM_IEEE80211_DS2 */
11, /* IFM_IEEE80211_DS5 */
22, /* IFM_IEEE80211_DS11 */
44, /* IFM_IEEE80211_DS22 */
12, /* IFM_IEEE80211_OFDM6 */
18, /* IFM_IEEE80211_OFDM9 */
24, /* IFM_IEEE80211_OFDM12 */
36, /* IFM_IEEE80211_OFDM18 */
48, /* IFM_IEEE80211_OFDM24 */
72, /* IFM_IEEE80211_OFDM36 */
96, /* IFM_IEEE80211_OFDM48 */
108, /* IFM_IEEE80211_OFDM54 */
144, /* IFM_IEEE80211_OFDM72 */
0, /* IFM_IEEE80211_DS354k */
0, /* IFM_IEEE80211_DS512k */
6, /* IFM_IEEE80211_OFDM3 */
9, /* IFM_IEEE80211_OFDM4 */
54, /* IFM_IEEE80211_OFDM27 */
-1, /* IFM_IEEE80211_MCS */
};
return IFM_SUBTYPE(mword) < nitems(ieeerates) ?
ieeerates[IFM_SUBTYPE(mword)] : 0;
}
/*
* The following hash function is adapted from "Hash Functions" by Bob Jenkins
* ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
*/
#define mix(a, b, c) \
do { \
a -= b; a -= c; a ^= (c >> 13); \
b -= c; b -= a; b ^= (a << 8); \
c -= a; c -= b; c ^= (b >> 13); \
a -= b; a -= c; a ^= (c >> 12); \
b -= c; b -= a; b ^= (a << 16); \
c -= a; c -= b; c ^= (b >> 5); \
a -= b; a -= c; a ^= (c >> 3); \
b -= c; b -= a; b ^= (a << 10); \
c -= a; c -= b; c ^= (b >> 15); \
} while (/*CONSTCOND*/0)
uint32_t
ieee80211_mac_hash(const struct ieee80211com *ic,
const uint8_t addr[IEEE80211_ADDR_LEN])
{
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = ic->ic_hash_key;
b += addr[5] << 8;
b += addr[4];
a += addr[3] << 24;
a += addr[2] << 16;
a += addr[1] << 8;
a += addr[0];
mix(a, b, c);
return c;
}
#undef mix
char
ieee80211_channel_type_char(const struct ieee80211_channel *c)
{
if (IEEE80211_IS_CHAN_ST(c))
return 'S';
if (IEEE80211_IS_CHAN_108A(c))
return 'T';
if (IEEE80211_IS_CHAN_108G(c))
return 'G';
if (IEEE80211_IS_CHAN_HT(c))
return 'n';
if (IEEE80211_IS_CHAN_A(c))
return 'a';
if (IEEE80211_IS_CHAN_ANYG(c))
return 'g';
if (IEEE80211_IS_CHAN_B(c))
return 'b';
return 'f';
}
Index: head/sys/net80211/ieee80211_ddb.c
===================================================================
--- head/sys/net80211/ieee80211_ddb.c (revision 287196)
+++ head/sys/net80211/ieee80211_ddb.c (revision 287197)
@@ -1,899 +1,898 @@
/*-
* Copyright (c) 2007-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_ddb.h"
#include "opt_wlan.h"
#ifdef DDB
/*
* IEEE 802.11 DDB support
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/ethernet.h>
#include <net/vnet.h>
#include <net80211/ieee80211_var.h>
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#ifdef IEEE80211_SUPPORT_MESH
#include <net80211/ieee80211_mesh.h>
#endif
#include <ddb/ddb.h>
#include <ddb/db_sym.h>
#define DB_PRINTSYM(prefix, name, addr) do { \
db_printf("%s%-25s : ", prefix, name); \
db_printsym((db_addr_t) addr, DB_STGY_ANY); \
db_printf("\n"); \
} while (0)
static void _db_show_sta(const struct ieee80211_node *);
static void _db_show_vap(const struct ieee80211vap *, int, int);
static void _db_show_com(const struct ieee80211com *,
int showvaps, int showsta, int showmesh, int showprocs);
static void _db_show_node_table(const char *tag,
const struct ieee80211_node_table *);
static void _db_show_channel(const char *tag, const struct ieee80211_channel *);
static void _db_show_ssid(const char *tag, int ix, int len, const uint8_t *);
static void _db_show_appie(const char *tag, const struct ieee80211_appie *);
static void _db_show_key(const char *tag, int ix, const struct ieee80211_key *);
static void _db_show_roamparams(const char *tag, const void *arg,
const struct ieee80211_roamparam *rp);
static void _db_show_txparams(const char *tag, const void *arg,
const struct ieee80211_txparam *tp);
static void _db_show_ageq(const char *tag, const struct ieee80211_ageq *q);
static void _db_show_stats(const struct ieee80211_stats *);
#ifdef IEEE80211_SUPPORT_MESH
static void _db_show_mesh(const struct ieee80211_mesh_state *);
#endif
DB_SHOW_COMMAND(sta, db_show_sta)
{
if (!have_addr) {
db_printf("usage: show sta <addr>\n");
return;
}
_db_show_sta((const struct ieee80211_node *) addr);
}
DB_SHOW_COMMAND(statab, db_show_statab)
{
if (!have_addr) {
db_printf("usage: show statab <addr>\n");
return;
}
_db_show_node_table("", (const struct ieee80211_node_table *) addr);
}
DB_SHOW_COMMAND(vap, db_show_vap)
{
int i, showmesh = 0, showprocs = 0;
if (!have_addr) {
db_printf("usage: show vap <addr>\n");
return;
}
for (i = 0; modif[i] != '\0'; i++)
switch (modif[i]) {
case 'a':
showprocs = 1;
showmesh = 1;
break;
case 'm':
showmesh = 1;
break;
case 'p':
showprocs = 1;
break;
}
_db_show_vap((const struct ieee80211vap *) addr, showmesh, showprocs);
}
DB_SHOW_COMMAND(com, db_show_com)
{
const struct ieee80211com *ic;
int i, showprocs = 0, showvaps = 0, showsta = 0, showmesh = 0;
if (!have_addr) {
db_printf("usage: show com <addr>\n");
return;
}
for (i = 0; modif[i] != '\0'; i++)
switch (modif[i]) {
case 'a':
showsta = showmesh = showvaps = showprocs = 1;
break;
case 's':
showsta = 1;
break;
case 'm':
showmesh = 1;
break;
case 'v':
showvaps = 1;
break;
case 'p':
showprocs = 1;
break;
}
ic = (const struct ieee80211com *) addr;
_db_show_com(ic, showvaps, showsta, showmesh, showprocs);
}
DB_SHOW_ALL_COMMAND(vaps, db_show_all_vaps)
{
VNET_ITERATOR_DECL(vnet_iter);
const struct ifnet *ifp;
int i, showall = 0;
for (i = 0; modif[i] != '\0'; i++)
switch (modif[i]) {
case 'a':
showall = 1;
break;
}
VNET_FOREACH(vnet_iter) {
TAILQ_FOREACH(ifp, &V_ifnet, if_list)
if (ifp->if_type == IFT_IEEE80211) {
const struct ieee80211com *ic = ifp->if_l2com;
if (!showall) {
const struct ieee80211vap *vap;
db_printf("%s: com %p vaps:",
ifp->if_xname, ic);
TAILQ_FOREACH(vap, &ic->ic_vaps,
iv_next)
db_printf(" %s(%p)",
vap->iv_ifp->if_xname, vap);
db_printf("\n");
} else
_db_show_com(ic, 1, 1, 1, 1);
}
}
}
#ifdef IEEE80211_SUPPORT_MESH
DB_SHOW_ALL_COMMAND(mesh, db_show_mesh)
{
const struct ieee80211_mesh_state *ms;
if (!have_addr) {
db_printf("usage: show mesh <addr>\n");
return;
}
ms = (const struct ieee80211_mesh_state *) addr;
_db_show_mesh(ms);
}
#endif /* IEEE80211_SUPPORT_MESH */
static void
_db_show_txampdu(const char *sep, int ix, const struct ieee80211_tx_ampdu *tap)
{
db_printf("%stxampdu[%d]: %p flags %b %s\n",
sep, ix, tap, tap->txa_flags, IEEE80211_AGGR_BITS,
ieee80211_wme_acnames[TID_TO_WME_AC(tap->txa_tid)]);
db_printf("%s token %u lastsample %d pkts %d avgpps %d qbytes %d qframes %d\n",
sep, tap->txa_token, tap->txa_lastsample, tap->txa_pkts,
tap->txa_avgpps, tap->txa_qbytes, tap->txa_qframes);
db_printf("%s start %u seqpending %u wnd %u attempts %d nextrequest %d\n",
sep, tap->txa_start, tap->txa_seqpending, tap->txa_wnd,
tap->txa_attempts, tap->txa_nextrequest);
/* XXX timer */
}
static void
_db_show_rxampdu(const char *sep, int ix, const struct ieee80211_rx_ampdu *rap)
{
int i;
db_printf("%srxampdu[%d]: %p flags 0x%x tid %u\n",
sep, ix, rap, rap->rxa_flags, ix /*XXX */);
db_printf("%s qbytes %d qframes %d seqstart %u start %u wnd %u\n",
sep, rap->rxa_qbytes, rap->rxa_qframes,
rap->rxa_seqstart, rap->rxa_start, rap->rxa_wnd);
db_printf("%s age %d nframes %d\n", sep,
rap->rxa_age, rap->rxa_nframes);
for (i = 0; i < IEEE80211_AGGR_BAWMAX; i++)
if (rap->rxa_m[i] != NULL)
db_printf("%s m[%2u:%4u] %p\n", sep, i,
IEEE80211_SEQ_ADD(rap->rxa_start, i),
rap->rxa_m[i]);
}
static void
_db_show_sta(const struct ieee80211_node *ni)
{
int i;
db_printf("0x%p: mac %s refcnt %d\n", ni,
ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni));
db_printf("\tvap %p wdsvap %p ic %p table %p\n",
ni->ni_vap, ni->ni_wdsvap, ni->ni_ic, ni->ni_table);
db_printf("\tflags=%b\n", ni->ni_flags, IEEE80211_NODE_BITS);
db_printf("\tscangen %u authmode %u ath_flags 0x%x ath_defkeyix %u\n",
ni->ni_scangen, ni->ni_authmode,
ni->ni_ath_flags, ni->ni_ath_defkeyix);
db_printf("\tassocid 0x%x txpower %u vlan %u\n",
ni->ni_associd, ni->ni_txpower, ni->ni_vlan);
db_printf("\tjointime %d (%lu secs) challenge %p\n",
ni->ni_jointime, (unsigned long)(time_uptime - ni->ni_jointime),
ni->ni_challenge);
db_printf("\ties: data %p len %d\n", ni->ni_ies.data, ni->ni_ies.len);
db_printf("\t[wpa_ie %p rsn_ie %p wme_ie %p ath_ie %p\n",
ni->ni_ies.wpa_ie, ni->ni_ies.rsn_ie, ni->ni_ies.wme_ie,
ni->ni_ies.ath_ie);
db_printf("\t htcap_ie %p htinfo_ie %p]\n",
ni->ni_ies.htcap_ie, ni->ni_ies.htinfo_ie);
if (ni->ni_flags & IEEE80211_NODE_QOS) {
for (i = 0; i < WME_NUM_TID; i++) {
if (ni->ni_txseqs[i] || ni->ni_rxseqs[i])
db_printf("\t[%u] txseq %u rxseq %u fragno %u\n",
i, ni->ni_txseqs[i],
ni->ni_rxseqs[i] >> IEEE80211_SEQ_SEQ_SHIFT,
ni->ni_rxseqs[i] & IEEE80211_SEQ_FRAG_MASK);
}
}
db_printf("\ttxseq %u rxseq %u fragno %u rxfragstamp %u\n",
ni->ni_txseqs[IEEE80211_NONQOS_TID],
ni->ni_rxseqs[IEEE80211_NONQOS_TID] >> IEEE80211_SEQ_SEQ_SHIFT,
ni->ni_rxseqs[IEEE80211_NONQOS_TID] & IEEE80211_SEQ_FRAG_MASK,
ni->ni_rxfragstamp);
db_printf("\trxfrag[0] %p rxfrag[1] %p rxfrag[2] %p\n",
ni->ni_rxfrag[0], ni->ni_rxfrag[1], ni->ni_rxfrag[2]);
_db_show_key("\tucastkey", 0, &ni->ni_ucastkey);
db_printf("\tavgrssi 0x%x (rssi %d) noise %d\n",
ni->ni_avgrssi, IEEE80211_RSSI_GET(ni->ni_avgrssi),
ni->ni_noise);
db_printf("\tintval %u capinfo %b\n",
ni->ni_intval, ni->ni_capinfo, IEEE80211_CAPINFO_BITS);
db_printf("\tbssid %s", ether_sprintf(ni->ni_bssid));
_db_show_ssid(" essid ", 0, ni->ni_esslen, ni->ni_essid);
db_printf("\n");
_db_show_channel("\tchannel", ni->ni_chan);
db_printf("\n");
db_printf("\terp %b dtim_period %u dtim_count %u\n",
ni->ni_erp, IEEE80211_ERP_BITS,
ni->ni_dtim_period, ni->ni_dtim_count);
db_printf("\thtcap %b htparam 0x%x htctlchan %u ht2ndchan %u\n",
ni->ni_htcap, IEEE80211_HTCAP_BITS,
ni->ni_htparam, ni->ni_htctlchan, ni->ni_ht2ndchan);
db_printf("\thtopmode 0x%x htstbc 0x%x chw %u\n",
ni->ni_htopmode, ni->ni_htstbc, ni->ni_chw);
/* XXX ampdu state */
for (i = 0; i < WME_NUM_TID; i++)
if (ni->ni_tx_ampdu[i].txa_flags & IEEE80211_AGGR_SETUP)
_db_show_txampdu("\t", i, &ni->ni_tx_ampdu[i]);
for (i = 0; i < WME_NUM_TID; i++)
if (ni->ni_rx_ampdu[i].rxa_flags)
_db_show_rxampdu("\t", i, &ni->ni_rx_ampdu[i]);
db_printf("\tinact %u inact_reload %u txrate %u\n",
ni->ni_inact, ni->ni_inact_reload, ni->ni_txrate);
#ifdef IEEE80211_SUPPORT_MESH
_db_show_ssid("\tmeshid ", 0, ni->ni_meshidlen, ni->ni_meshid);
db_printf(" mlstate %b mllid 0x%x mlpid 0x%x mlrcnt %u mltval %u\n",
ni->ni_mlstate, IEEE80211_MESH_MLSTATE_BITS,
ni->ni_mllid, ni->ni_mlpid, ni->ni_mlrcnt, ni->ni_mltval);
#endif
}
#ifdef IEEE80211_SUPPORT_TDMA
static void
_db_show_tdma(const char *sep, const struct ieee80211_tdma_state *ts, int showprocs)
{
db_printf("%stdma %p:\n", sep, ts);
db_printf("%s version %u slot %u bintval %u peer %p\n", sep,
ts->tdma_version, ts->tdma_slot, ts->tdma_bintval, ts->tdma_peer);
db_printf("%s slotlen %u slotcnt %u", sep,
ts->tdma_slotlen, ts->tdma_slotcnt);
db_printf(" inuse 0x%x active 0x%x count %d\n",
ts->tdma_inuse[0], ts->tdma_active[0], ts->tdma_count);
if (showprocs) {
DB_PRINTSYM(sep, " tdma_newstate", ts->tdma_newstate);
DB_PRINTSYM(sep, " tdma_recv_mgmt", ts->tdma_recv_mgmt);
DB_PRINTSYM(sep, " tdma_opdetach", ts->tdma_opdetach);
}
}
#endif /* IEEE80211_SUPPORT_TDMA */
static void
_db_show_vap(const struct ieee80211vap *vap, int showmesh, int showprocs)
{
const struct ieee80211com *ic = vap->iv_ic;
int i;
db_printf("%p:", vap);
db_printf(" bss %p", vap->iv_bss);
db_printf(" myaddr %s", ether_sprintf(vap->iv_myaddr));
db_printf("\n");
db_printf("\topmode %s", ieee80211_opmode_name[vap->iv_opmode]);
#ifdef IEEE80211_SUPPORT_MESH
if (vap->iv_opmode == IEEE80211_M_MBSS)
db_printf("(%p)", vap->iv_mesh);
#endif
db_printf(" state %s", ieee80211_state_name[vap->iv_state]);
db_printf(" ifp %p(%s)", vap->iv_ifp, vap->iv_ifp->if_xname);
db_printf("\n");
db_printf("\tic %p", vap->iv_ic);
db_printf(" media %p", &vap->iv_media);
db_printf(" bpf_if %p", vap->iv_rawbpf);
db_printf(" mgtsend %p", &vap->iv_mgtsend);
#if 0
struct sysctllog *iv_sysctl; /* dynamic sysctl context */
#endif
db_printf("\n");
db_printf("\tdebug=%b\n", vap->iv_debug, IEEE80211_MSG_BITS);
db_printf("\tflags=%b\n", vap->iv_flags, IEEE80211_F_BITS);
db_printf("\tflags_ext=%b\n", vap->iv_flags_ext, IEEE80211_FEXT_BITS);
db_printf("\tflags_ht=%b\n", vap->iv_flags_ht, IEEE80211_FHT_BITS);
db_printf("\tflags_ven=%b\n", vap->iv_flags_ven, IEEE80211_FVEN_BITS);
db_printf("\tcaps=%b\n", vap->iv_caps, IEEE80211_C_BITS);
db_printf("\thtcaps=%b\n", vap->iv_htcaps, IEEE80211_C_HTCAP_BITS);
_db_show_stats(&vap->iv_stats);
db_printf("\tinact_init %d", vap->iv_inact_init);
db_printf(" inact_auth %d", vap->iv_inact_auth);
db_printf(" inact_run %d", vap->iv_inact_run);
db_printf(" inact_probe %d", vap->iv_inact_probe);
db_printf("\n");
db_printf("\tdes_nssid %d", vap->iv_des_nssid);
if (vap->iv_des_nssid)
_db_show_ssid(" des_ssid[%u] ", 0,
vap->iv_des_ssid[0].len, vap->iv_des_ssid[0].ssid);
db_printf(" des_bssid %s", ether_sprintf(vap->iv_des_bssid));
db_printf("\n");
db_printf("\tdes_mode %d", vap->iv_des_mode);
_db_show_channel(" des_chan", vap->iv_des_chan);
db_printf("\n");
#if 0
int iv_nicknamelen; /* XXX junk */
uint8_t iv_nickname[IEEE80211_NWID_LEN];
#endif
db_printf("\tbgscanidle %u", vap->iv_bgscanidle);
db_printf(" bgscanintvl %u", vap->iv_bgscanintvl);
db_printf(" scanvalid %u", vap->iv_scanvalid);
db_printf("\n");
db_printf("\tscanreq_duration %u", vap->iv_scanreq_duration);
db_printf(" scanreq_mindwell %u", vap->iv_scanreq_mindwell);
db_printf(" scanreq_maxdwell %u", vap->iv_scanreq_maxdwell);
db_printf("\n");
db_printf("\tscanreq_flags 0x%x", vap->iv_scanreq_flags);
db_printf(" scanreq_nssid %d", vap->iv_scanreq_nssid);
for (i = 0; i < vap->iv_scanreq_nssid; i++)
_db_show_ssid(" scanreq_ssid[%u]", i,
vap->iv_scanreq_ssid[i].len, vap->iv_scanreq_ssid[i].ssid);
db_printf(" roaming %d", vap->iv_roaming);
db_printf("\n");
for (i = IEEE80211_MODE_11A; i < IEEE80211_MODE_MAX; i++)
if (isset(ic->ic_modecaps, i)) {
_db_show_roamparams("\troamparms[%s]",
ieee80211_phymode_name[i], &vap->iv_roamparms[i]);
db_printf("\n");
}
db_printf("\tbmissthreshold %u", vap->iv_bmissthreshold);
db_printf(" bmiss_max %u", vap->iv_bmiss_count);
db_printf(" bmiss_max %d", vap->iv_bmiss_max);
db_printf("\n");
db_printf("\tswbmiss_count %u", vap->iv_swbmiss_count);
db_printf(" swbmiss_period %u", vap->iv_swbmiss_period);
db_printf(" swbmiss %p", &vap->iv_swbmiss);
db_printf("\n");
db_printf("\tampdu_rxmax %d", vap->iv_ampdu_rxmax);
db_printf(" ampdu_density %d", vap->iv_ampdu_density);
db_printf(" ampdu_limit %d", vap->iv_ampdu_limit);
db_printf(" amsdu_limit %d", vap->iv_amsdu_limit);
db_printf("\n");
db_printf("\tmax_aid %u", vap->iv_max_aid);
db_printf(" aid_bitmap %p", vap->iv_aid_bitmap);
db_printf("\n");
db_printf("\tsta_assoc %u", vap->iv_sta_assoc);
db_printf(" ps_sta %u", vap->iv_ps_sta);
db_printf(" ps_pending %u", vap->iv_ps_pending);
db_printf(" tim_len %u", vap->iv_tim_len);
db_printf(" tim_bitmap %p", vap->iv_tim_bitmap);
db_printf("\n");
db_printf("\tdtim_period %u", vap->iv_dtim_period);
db_printf(" dtim_count %u", vap->iv_dtim_count);
db_printf(" set_tim %p", vap->iv_set_tim);
db_printf(" csa_count %d", vap->iv_csa_count);
db_printf("\n");
db_printf("\trtsthreshold %u", vap->iv_rtsthreshold);
db_printf(" fragthreshold %u", vap->iv_fragthreshold);
db_printf(" inact_timer %d", vap->iv_inact_timer);
db_printf("\n");
for (i = IEEE80211_MODE_11A; i < IEEE80211_MODE_MAX; i++)
if (isset(ic->ic_modecaps, i)) {
_db_show_txparams("\ttxparms[%s]",
ieee80211_phymode_name[i], &vap->iv_txparms[i]);
db_printf("\n");
}
/* application-specified IE's to attach to mgt frames */
_db_show_appie("\tappie_beacon", vap->iv_appie_beacon);
_db_show_appie("\tappie_probereq", vap->iv_appie_probereq);
_db_show_appie("\tappie_proberesp", vap->iv_appie_proberesp);
_db_show_appie("\tappie_assocreq", vap->iv_appie_assocreq);
_db_show_appie("\tappie_asscoresp", vap->iv_appie_assocresp);
_db_show_appie("\tappie_wpa", vap->iv_appie_wpa);
if (vap->iv_wpa_ie != NULL || vap->iv_rsn_ie != NULL) {
if (vap->iv_wpa_ie != NULL)
db_printf("\twpa_ie %p", vap->iv_wpa_ie);
if (vap->iv_rsn_ie != NULL)
db_printf("\trsn_ie %p", vap->iv_rsn_ie);
db_printf("\n");
}
db_printf("\tmax_keyix %u", vap->iv_max_keyix);
db_printf(" def_txkey %d", vap->iv_def_txkey);
db_printf("\n");
for (i = 0; i < IEEE80211_WEP_NKID; i++)
_db_show_key("\tnw_keys[%u]", i, &vap->iv_nw_keys[i]);
db_printf("\tauth %p(%s)", vap->iv_auth, vap->iv_auth->ia_name);
db_printf(" ec %p", vap->iv_ec);
db_printf(" acl %p", vap->iv_acl);
db_printf(" as %p", vap->iv_as);
db_printf("\n");
#ifdef IEEE80211_SUPPORT_MESH
if (showmesh && vap->iv_mesh != NULL)
_db_show_mesh(vap->iv_mesh);
#endif
#ifdef IEEE80211_SUPPORT_TDMA
if (vap->iv_tdma != NULL)
_db_show_tdma("\t", vap->iv_tdma, showprocs);
#endif /* IEEE80211_SUPPORT_TDMA */
if (showprocs) {
DB_PRINTSYM("\t", "iv_key_alloc", vap->iv_key_alloc);
DB_PRINTSYM("\t", "iv_key_delete", vap->iv_key_delete);
DB_PRINTSYM("\t", "iv_key_set", vap->iv_key_set);
DB_PRINTSYM("\t", "iv_key_update_begin", vap->iv_key_update_begin);
DB_PRINTSYM("\t", "iv_key_update_end", vap->iv_key_update_end);
DB_PRINTSYM("\t", "iv_opdetach", vap->iv_opdetach);
DB_PRINTSYM("\t", "iv_input", vap->iv_input);
DB_PRINTSYM("\t", "iv_recv_mgmt", vap->iv_recv_mgmt);
DB_PRINTSYM("\t", "iv_deliver_data", vap->iv_deliver_data);
DB_PRINTSYM("\t", "iv_bmiss", vap->iv_bmiss);
DB_PRINTSYM("\t", "iv_reset", vap->iv_reset);
DB_PRINTSYM("\t", "iv_update_beacon", vap->iv_update_beacon);
DB_PRINTSYM("\t", "iv_newstate", vap->iv_newstate);
DB_PRINTSYM("\t", "iv_output", vap->iv_output);
}
}
static void
_db_show_com(const struct ieee80211com *ic, int showvaps, int showsta,
int showmesh, int showprocs)
{
struct ieee80211vap *vap;
db_printf("%p:", ic);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
db_printf(" %s(%p)", vap->iv_ifp->if_xname, vap);
db_printf("\n");
- db_printf("\tifp %p(%s)", ic->ic_ifp, ic->ic_ifp->if_xname);
+ db_printf("\tsoftc %p", ic->ic_softc);
db_printf("\tname %s", ic->ic_name);
db_printf(" comlock %p", &ic->ic_comlock);
db_printf("\n");
db_printf("\theadroom %d", ic->ic_headroom);
db_printf(" phytype %d", ic->ic_phytype);
db_printf(" opmode %s", ieee80211_opmode_name[ic->ic_opmode]);
db_printf("\n");
- db_printf("\tmedia %p", &ic->ic_media);
db_printf(" inact %p", &ic->ic_inact);
db_printf("\n");
db_printf("\tflags=%b\n", ic->ic_flags, IEEE80211_F_BITS);
db_printf("\tflags_ext=%b\n", ic->ic_flags_ext, IEEE80211_FEXT_BITS);
db_printf("\tflags_ht=%b\n", ic->ic_flags_ht, IEEE80211_FHT_BITS);
db_printf("\tflags_ven=%b\n", ic->ic_flags_ven, IEEE80211_FVEN_BITS);
db_printf("\tcaps=%b\n", ic->ic_caps, IEEE80211_C_BITS);
db_printf("\tcryptocaps=%b\n",
ic->ic_cryptocaps, IEEE80211_CRYPTO_BITS);
db_printf("\thtcaps=%b\n", ic->ic_htcaps, IEEE80211_HTCAP_BITS);
#if 0
uint8_t ic_modecaps[2]; /* set of mode capabilities */
#endif
db_printf("\tcurmode %u", ic->ic_curmode);
db_printf(" promisc %u", ic->ic_promisc);
db_printf(" allmulti %u", ic->ic_allmulti);
db_printf(" nrunning %u", ic->ic_nrunning);
db_printf("\n");
db_printf("\tbintval %u", ic->ic_bintval);
db_printf(" lintval %u", ic->ic_lintval);
db_printf(" holdover %u", ic->ic_holdover);
db_printf(" txpowlimit %u", ic->ic_txpowlimit);
db_printf("\n");
#if 0
struct ieee80211_rateset ic_sup_rates[IEEE80211_MODE_MAX];
#endif
/*
* Channel state:
*
* ic_channels is the set of available channels for the device;
* it is setup by the driver
* ic_nchans is the number of valid entries in ic_channels
* ic_chan_avail is a bit vector of these channels used to check
* whether a channel is available w/o searching the channel table.
* ic_chan_active is a (potentially) constrained subset of
* ic_chan_avail that reflects any mode setting or user-specified
* limit on the set of channels to use/scan
* ic_curchan is the current channel the device is set to; it may
* be different from ic_bsschan when we are off-channel scanning
* or otherwise doing background work
* ic_bsschan is the channel selected for operation; it may
* be undefined (IEEE80211_CHAN_ANYC)
* ic_prevchan is a cached ``previous channel'' used to optimize
* lookups when switching back+forth between two channels
* (e.g. for dynamic turbo)
*/
db_printf("\tnchans %d", ic->ic_nchans);
#if 0
struct ieee80211_channel ic_channels[IEEE80211_CHAN_MAX];
uint8_t ic_chan_avail[IEEE80211_CHAN_BYTES];
uint8_t ic_chan_active[IEEE80211_CHAN_BYTES];
uint8_t ic_chan_scan[IEEE80211_CHAN_BYTES];
#endif
db_printf("\n");
_db_show_channel("\tcurchan", ic->ic_curchan);
db_printf("\n");
_db_show_channel("\tbsschan", ic->ic_bsschan);
db_printf("\n");
_db_show_channel("\tprevchan", ic->ic_prevchan);
db_printf("\n");
db_printf("\tregdomain %p", &ic->ic_regdomain);
db_printf("\n");
_db_show_channel("\tcsa_newchan", ic->ic_csa_newchan);
db_printf(" csa_count %d", ic->ic_csa_count);
db_printf( "dfs %p", &ic->ic_dfs);
db_printf("\n");
db_printf("\tscan %p", ic->ic_scan);
db_printf(" lastdata %d", ic->ic_lastdata);
db_printf(" lastscan %d", ic->ic_lastscan);
db_printf("\n");
db_printf("\tmax_keyix %d", ic->ic_max_keyix);
db_printf(" hash_key 0x%x", ic->ic_hash_key);
db_printf(" wme %p", &ic->ic_wme);
if (!showsta)
db_printf(" sta %p", &ic->ic_sta);
db_printf("\n");
db_printf("\tstageq@%p:\n", &ic->ic_stageq);
_db_show_ageq("\t", &ic->ic_stageq);
if (showsta)
_db_show_node_table("\t", &ic->ic_sta);
db_printf("\tprotmode %d", ic->ic_protmode);
db_printf(" nonerpsta %u", ic->ic_nonerpsta);
db_printf(" longslotsta %u", ic->ic_longslotsta);
db_printf(" lastnonerp %d", ic->ic_lastnonerp);
db_printf("\n");
db_printf("\tsta_assoc %u", ic->ic_sta_assoc);
db_printf(" ht_sta_assoc %u", ic->ic_ht_sta_assoc);
db_printf(" ht40_sta_assoc %u", ic->ic_ht40_sta_assoc);
db_printf("\n");
db_printf("\tcurhtprotmode 0x%x", ic->ic_curhtprotmode);
db_printf(" htprotmode %d", ic->ic_htprotmode);
db_printf(" lastnonht %d", ic->ic_lastnonht);
db_printf("\n");
db_printf("\tsuperg %p\n", ic->ic_superg);
db_printf("\tmontaps %d th %p txchan %p rh %p rxchan %p\n",
ic->ic_montaps, ic->ic_th, ic->ic_txchan, ic->ic_rh, ic->ic_rxchan);
if (showprocs) {
DB_PRINTSYM("\t", "ic_vap_create", ic->ic_vap_create);
DB_PRINTSYM("\t", "ic_vap_delete", ic->ic_vap_delete);
#if 0
/* operating mode attachment */
ieee80211vap_attach ic_vattach[IEEE80211_OPMODE_MAX];
#endif
DB_PRINTSYM("\t", "ic_newassoc", ic->ic_newassoc);
DB_PRINTSYM("\t", "ic_getradiocaps", ic->ic_getradiocaps);
DB_PRINTSYM("\t", "ic_setregdomain", ic->ic_setregdomain);
DB_PRINTSYM("\t", "ic_send_mgmt", ic->ic_send_mgmt);
DB_PRINTSYM("\t", "ic_raw_xmit", ic->ic_raw_xmit);
DB_PRINTSYM("\t", "ic_updateslot", ic->ic_updateslot);
DB_PRINTSYM("\t", "ic_update_mcast", ic->ic_update_mcast);
DB_PRINTSYM("\t", "ic_update_promisc", ic->ic_update_promisc);
DB_PRINTSYM("\t", "ic_node_alloc", ic->ic_node_alloc);
DB_PRINTSYM("\t", "ic_node_free", ic->ic_node_free);
DB_PRINTSYM("\t", "ic_node_cleanup", ic->ic_node_cleanup);
DB_PRINTSYM("\t", "ic_node_getrssi", ic->ic_node_getrssi);
DB_PRINTSYM("\t", "ic_node_getsignal", ic->ic_node_getsignal);
DB_PRINTSYM("\t", "ic_node_getmimoinfo", ic->ic_node_getmimoinfo);
DB_PRINTSYM("\t", "ic_scan_start", ic->ic_scan_start);
DB_PRINTSYM("\t", "ic_scan_end", ic->ic_scan_end);
DB_PRINTSYM("\t", "ic_set_channel", ic->ic_set_channel);
DB_PRINTSYM("\t", "ic_scan_curchan", ic->ic_scan_curchan);
DB_PRINTSYM("\t", "ic_scan_mindwell", ic->ic_scan_mindwell);
DB_PRINTSYM("\t", "ic_recv_action", ic->ic_recv_action);
DB_PRINTSYM("\t", "ic_send_action", ic->ic_send_action);
DB_PRINTSYM("\t", "ic_addba_request", ic->ic_addba_request);
DB_PRINTSYM("\t", "ic_addba_response", ic->ic_addba_response);
DB_PRINTSYM("\t", "ic_addba_stop", ic->ic_addba_stop);
}
if (showvaps && !TAILQ_EMPTY(&ic->ic_vaps)) {
db_printf("\n");
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
_db_show_vap(vap, showmesh, showprocs);
}
if (showsta && !TAILQ_EMPTY(&ic->ic_sta.nt_node)) {
const struct ieee80211_node_table *nt = &ic->ic_sta;
const struct ieee80211_node *ni;
TAILQ_FOREACH(ni, &nt->nt_node, ni_list) {
db_printf("\n");
_db_show_sta(ni);
}
}
}
static void
_db_show_node_table(const char *tag, const struct ieee80211_node_table *nt)
{
int i;
db_printf("%s%s@%p:\n", tag, nt->nt_name, nt);
db_printf("%s nodelock %p", tag, &nt->nt_nodelock);
db_printf(" inact_init %d", nt->nt_inact_init);
db_printf(" scanlock %p", &nt->nt_scanlock);
db_printf(" scangen %u\n", nt->nt_scangen);
db_printf("%s keyixmax %d keyixmap %p\n",
tag, nt->nt_keyixmax, nt->nt_keyixmap);
for (i = 0; i < nt->nt_keyixmax; i++) {
const struct ieee80211_node *ni = nt->nt_keyixmap[i];
if (ni != NULL)
db_printf("%s [%3u] %p %s\n", tag, i, ni,
ether_sprintf(ni->ni_macaddr));
}
}
static void
_db_show_channel(const char *tag, const struct ieee80211_channel *c)
{
db_printf("%s ", tag);
if (c == NULL)
db_printf("<NULL>");
else if (c == IEEE80211_CHAN_ANYC)
db_printf("<ANY>");
else
db_printf("[%u (%u) flags=%b maxreg %d maxpow %d minpow %d state 0x%x extieee %u]",
c->ic_freq, c->ic_ieee,
c->ic_flags, IEEE80211_CHAN_BITS,
c->ic_maxregpower, c->ic_maxpower, c->ic_minpower,
c->ic_state, c->ic_extieee);
}
static void
_db_show_ssid(const char *tag, int ix, int len, const uint8_t *ssid)
{
const uint8_t *p;
int i;
db_printf(tag, ix);
if (len > IEEE80211_NWID_LEN)
len = IEEE80211_NWID_LEN;
/* determine printable or not */
for (i = 0, p = ssid; i < len; i++, p++) {
if (*p < ' ' || *p > 0x7e)
break;
}
if (i == len) {
db_printf("\"");
for (i = 0, p = ssid; i < len; i++, p++)
db_printf("%c", *p);
db_printf("\"");
} else {
db_printf("0x");
for (i = 0, p = ssid; i < len; i++, p++)
db_printf("%02x", *p);
}
}
static void
_db_show_appie(const char *tag, const struct ieee80211_appie *ie)
{
const uint8_t *p;
int i;
if (ie == NULL)
return;
db_printf("%s [0x", tag);
for (i = 0, p = ie->ie_data; i < ie->ie_len; i++, p++)
db_printf("%02x", *p);
db_printf("]\n");
}
static void
_db_show_key(const char *tag, int ix, const struct ieee80211_key *wk)
{
static const uint8_t zerodata[IEEE80211_KEYBUF_SIZE];
const struct ieee80211_cipher *cip = wk->wk_cipher;
int keylen = wk->wk_keylen;
db_printf(tag, ix);
switch (cip->ic_cipher) {
case IEEE80211_CIPHER_WEP:
/* compatibility */
db_printf(" wepkey %u:%s", wk->wk_keyix,
keylen <= 5 ? "40-bit" :
keylen <= 13 ? "104-bit" : "128-bit");
break;
case IEEE80211_CIPHER_TKIP:
if (keylen > 128/8)
keylen -= 128/8; /* ignore MIC for now */
db_printf(" TKIP %u:%u-bit", wk->wk_keyix, 8*keylen);
break;
case IEEE80211_CIPHER_AES_OCB:
db_printf(" AES-OCB %u:%u-bit", wk->wk_keyix, 8*keylen);
break;
case IEEE80211_CIPHER_AES_CCM:
db_printf(" AES-CCM %u:%u-bit", wk->wk_keyix, 8*keylen);
break;
case IEEE80211_CIPHER_CKIP:
db_printf(" CKIP %u:%u-bit", wk->wk_keyix, 8*keylen);
break;
case IEEE80211_CIPHER_NONE:
db_printf(" NULL %u:%u-bit", wk->wk_keyix, 8*keylen);
break;
default:
db_printf(" UNKNOWN (0x%x) %u:%u-bit",
cip->ic_cipher, wk->wk_keyix, 8*keylen);
break;
}
if (wk->wk_rxkeyix != wk->wk_keyix)
db_printf(" rxkeyix %u", wk->wk_rxkeyix);
if (memcmp(wk->wk_key, zerodata, keylen) != 0) {
int i;
db_printf(" <");
for (i = 0; i < keylen; i++)
db_printf("%02x", wk->wk_key[i]);
db_printf(">");
if (cip->ic_cipher != IEEE80211_CIPHER_WEP &&
wk->wk_keyrsc[IEEE80211_NONQOS_TID] != 0)
db_printf(" rsc %ju", (uintmax_t)wk->wk_keyrsc[IEEE80211_NONQOS_TID]);
if (cip->ic_cipher != IEEE80211_CIPHER_WEP &&
wk->wk_keytsc != 0)
db_printf(" tsc %ju", (uintmax_t)wk->wk_keytsc);
db_printf(" flags=%b", wk->wk_flags, IEEE80211_KEY_BITS);
}
db_printf("\n");
}
static void
printrate(const char *tag, int v)
{
if (v == IEEE80211_FIXED_RATE_NONE)
db_printf(" %s <none>", tag);
else if (v == 11)
db_printf(" %s 5.5", tag);
else if (v & IEEE80211_RATE_MCS)
db_printf(" %s MCS%d", tag, v &~ IEEE80211_RATE_MCS);
else
db_printf(" %s %d", tag, v/2);
}
static void
_db_show_roamparams(const char *tag, const void *arg,
const struct ieee80211_roamparam *rp)
{
db_printf(tag, arg);
if (rp->rssi & 1)
db_printf(" rssi %u.5", rp->rssi/2);
else
db_printf(" rssi %u", rp->rssi/2);
printrate("rate", rp->rate);
}
static void
_db_show_txparams(const char *tag, const void *arg,
const struct ieee80211_txparam *tp)
{
db_printf(tag, arg);
printrate("ucastrate", tp->ucastrate);
printrate("mcastrate", tp->mcastrate);
printrate("mgmtrate", tp->mgmtrate);
db_printf(" maxretry %d", tp->maxretry);
}
static void
_db_show_ageq(const char *tag, const struct ieee80211_ageq *q)
{
const struct mbuf *m;
db_printf("%s lock %p len %d maxlen %d drops %d head %p tail %p\n",
tag, &q->aq_lock, q->aq_len, q->aq_maxlen, q->aq_drops,
q->aq_head, q->aq_tail);
for (m = q->aq_head; m != NULL; m = m->m_nextpkt)
db_printf("%s %p (len %d, %b)\n", tag, m, m->m_len,
/* XXX could be either TX or RX but is mostly TX */
m->m_flags, IEEE80211_MBUF_TX_FLAG_BITS);
}
static void
_db_show_stats(const struct ieee80211_stats *is)
{
}
#ifdef IEEE80211_SUPPORT_MESH
static void
_db_show_mesh(const struct ieee80211_mesh_state *ms)
{
struct ieee80211_mesh_route *rt;
int i;
_db_show_ssid(" meshid ", 0, ms->ms_idlen, ms->ms_id);
db_printf("nextseq %u ttl %u flags 0x%x\n", ms->ms_seq,
ms->ms_ttl, ms->ms_flags);
db_printf("routing table:\n");
i = 0;
TAILQ_FOREACH(rt, &ms->ms_routes, rt_next) {
db_printf("entry %d:\tdest: %6D nexthop: %6D metric: %u", i,
rt->rt_dest, ":", rt->rt_nexthop, ":", rt->rt_metric);
db_printf("\tlifetime: %u lastseq: %u priv: %p\n",
ieee80211_mesh_rt_update(rt, 0),
rt->rt_lastmseq, rt->rt_priv);
i++;
}
}
#endif /* IEEE80211_SUPPORT_MESH */
#endif /* DDB */
Index: head/sys/net80211/ieee80211_freebsd.c
===================================================================
--- head/sys/net80211/ieee80211_freebsd.c (revision 287196)
+++ head/sys/net80211/ieee80211_freebsd.c (revision 287197)
@@ -1,935 +1,873 @@
/*-
* Copyright (c) 2003-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* IEEE 802.11 support (FreeBSD-specific code)
*/
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/linker.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/socket.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_clone.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include <net/ethernet.h>
#include <net/route.h>
#include <net/vnet.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_input.h>
SYSCTL_NODE(_net, OID_AUTO, wlan, CTLFLAG_RD, 0, "IEEE 80211 parameters");
#ifdef IEEE80211_DEBUG
int ieee80211_debug = 0;
SYSCTL_INT(_net_wlan, OID_AUTO, debug, CTLFLAG_RW, &ieee80211_debug,
0, "debugging printfs");
#endif
static MALLOC_DEFINE(M_80211_COM, "80211com", "802.11 com state");
static const char wlanname[] = "wlan";
static struct if_clone *wlan_cloner;
-/*
- * Allocate/free com structure in conjunction with ifnet;
- * these routines are registered with if_register_com_alloc
- * below and are called automatically by the ifnet code
- * when the ifnet of the parent device is created.
- */
-static void *
-wlan_alloc(u_char type, struct ifnet *ifp)
-{
- struct ieee80211com *ic;
-
- ic = IEEE80211_MALLOC(sizeof(struct ieee80211com), M_80211_COM,
- IEEE80211_M_WAITOK | IEEE80211_M_ZERO);
- ic->ic_ifp = ifp;
-
- return (ic);
-}
-
-static void
-wlan_free(void *ic, u_char type)
-{
- IEEE80211_FREE(ic, M_80211_COM);
-}
-
static int
wlan_clone_create(struct if_clone *ifc, int unit, caddr_t params)
{
struct ieee80211_clone_params cp;
struct ieee80211vap *vap;
struct ieee80211com *ic;
- struct ifnet *ifp;
int error;
error = copyin(params, &cp, sizeof(cp));
if (error)
return error;
- ifp = ifunit(cp.icp_parent);
- if (ifp == NULL)
+ ic = ieee80211_find_com(cp.icp_parent);
+ if (ic == NULL)
return ENXIO;
- /* XXX move printfs to DIAGNOSTIC before release */
- if (ifp->if_type != IFT_IEEE80211) {
- if_printf(ifp, "%s: reject, not an 802.11 device\n", __func__);
- return ENXIO;
- }
if (cp.icp_opmode >= IEEE80211_OPMODE_MAX) {
- if_printf(ifp, "%s: invalid opmode %d\n",
- __func__, cp.icp_opmode);
+ ic_printf(ic, "%s: invalid opmode %d\n", __func__,
+ cp.icp_opmode);
return EINVAL;
}
- ic = ifp->if_l2com;
if ((ic->ic_caps & ieee80211_opcap[cp.icp_opmode]) == 0) {
- if_printf(ifp, "%s mode not supported\n",
+ ic_printf(ic, "%s mode not supported\n",
ieee80211_opmode_name[cp.icp_opmode]);
return EOPNOTSUPP;
}
if ((cp.icp_flags & IEEE80211_CLONE_TDMA) &&
#ifdef IEEE80211_SUPPORT_TDMA
(ic->ic_caps & IEEE80211_C_TDMA) == 0
#else
(1)
#endif
) {
- if_printf(ifp, "TDMA not supported\n");
+ ic_printf(ic, "TDMA not supported\n");
return EOPNOTSUPP;
}
vap = ic->ic_vap_create(ic, wlanname, unit,
cp.icp_opmode, cp.icp_flags, cp.icp_bssid,
cp.icp_flags & IEEE80211_CLONE_MACADDR ?
- cp.icp_macaddr : (const uint8_t *)IF_LLADDR(ifp));
+ cp.icp_macaddr : ic->ic_macaddr);
return (vap == NULL ? EIO : 0);
}
static void
wlan_clone_destroy(struct ifnet *ifp)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
ic->ic_vap_delete(vap);
}
void
ieee80211_vap_destroy(struct ieee80211vap *vap)
{
CURVNET_SET(vap->iv_ifp->if_vnet);
if_clone_destroyif(wlan_cloner, vap->iv_ifp);
CURVNET_RESTORE();
}
int
ieee80211_sysctl_msecs_ticks(SYSCTL_HANDLER_ARGS)
{
int msecs = ticks_to_msecs(*(int *)arg1);
int error, t;
error = sysctl_handle_int(oidp, &msecs, 0, req);
if (error || !req->newptr)
return error;
t = msecs_to_ticks(msecs);
*(int *)arg1 = (t < 1) ? 1 : t;
return 0;
}
static int
ieee80211_sysctl_inact(SYSCTL_HANDLER_ARGS)
{
int inact = (*(int *)arg1) * IEEE80211_INACT_WAIT;
int error;
error = sysctl_handle_int(oidp, &inact, 0, req);
if (error || !req->newptr)
return error;
*(int *)arg1 = inact / IEEE80211_INACT_WAIT;
return 0;
}
static int
ieee80211_sysctl_parent(SYSCTL_HANDLER_ARGS)
{
struct ieee80211com *ic = arg1;
return SYSCTL_OUT_STR(req, ic->ic_name);
}
static int
ieee80211_sysctl_radar(SYSCTL_HANDLER_ARGS)
{
struct ieee80211com *ic = arg1;
int t = 0, error;
error = sysctl_handle_int(oidp, &t, 0, req);
if (error || !req->newptr)
return error;
IEEE80211_LOCK(ic);
ieee80211_dfs_notify_radar(ic, ic->ic_curchan);
IEEE80211_UNLOCK(ic);
return 0;
}
void
ieee80211_sysctl_attach(struct ieee80211com *ic)
{
}
void
ieee80211_sysctl_detach(struct ieee80211com *ic)
{
}
void
ieee80211_sysctl_vattach(struct ieee80211vap *vap)
{
struct ifnet *ifp = vap->iv_ifp;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *oid;
char num[14]; /* sufficient for 32 bits */
ctx = (struct sysctl_ctx_list *) IEEE80211_MALLOC(sizeof(struct sysctl_ctx_list),
M_DEVBUF, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (ctx == NULL) {
if_printf(ifp, "%s: cannot allocate sysctl context!\n",
__func__);
return;
}
sysctl_ctx_init(ctx);
snprintf(num, sizeof(num), "%u", ifp->if_dunit);
oid = SYSCTL_ADD_NODE(ctx, &SYSCTL_NODE_CHILDREN(_net, wlan),
OID_AUTO, num, CTLFLAG_RD, NULL, "");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"%parent", CTLTYPE_STRING | CTLFLAG_RD, vap->iv_ic, 0,
ieee80211_sysctl_parent, "A", "parent device");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"driver_caps", CTLFLAG_RW, &vap->iv_caps, 0,
"driver capabilities");
#ifdef IEEE80211_DEBUG
vap->iv_debug = ieee80211_debug;
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"debug", CTLFLAG_RW, &vap->iv_debug, 0,
"control debugging printfs");
#endif
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"bmiss_max", CTLFLAG_RW, &vap->iv_bmiss_max, 0,
"consecutive beacon misses before scanning");
/* XXX inherit from tunables */
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"inact_run", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_run, 0,
ieee80211_sysctl_inact, "I",
"station inactivity timeout (sec)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"inact_probe", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_probe, 0,
ieee80211_sysctl_inact, "I",
"station inactivity probe timeout (sec)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"inact_auth", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_auth, 0,
ieee80211_sysctl_inact, "I",
"station authentication timeout (sec)");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"inact_init", CTLTYPE_INT | CTLFLAG_RW, &vap->iv_inact_init, 0,
ieee80211_sysctl_inact, "I",
"station initial state timeout (sec)");
if (vap->iv_htcaps & IEEE80211_HTC_HT) {
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"ampdu_mintraffic_bk", CTLFLAG_RW,
&vap->iv_ampdu_mintraffic[WME_AC_BK], 0,
"BK traffic tx aggr threshold (pps)");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"ampdu_mintraffic_be", CTLFLAG_RW,
&vap->iv_ampdu_mintraffic[WME_AC_BE], 0,
"BE traffic tx aggr threshold (pps)");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"ampdu_mintraffic_vo", CTLFLAG_RW,
&vap->iv_ampdu_mintraffic[WME_AC_VO], 0,
"VO traffic tx aggr threshold (pps)");
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"ampdu_mintraffic_vi", CTLFLAG_RW,
&vap->iv_ampdu_mintraffic[WME_AC_VI], 0,
"VI traffic tx aggr threshold (pps)");
}
if (vap->iv_caps & IEEE80211_C_DFS) {
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"radar", CTLTYPE_INT | CTLFLAG_RW, vap->iv_ic, 0,
ieee80211_sysctl_radar, "I", "simulate radar event");
}
vap->iv_sysctl = ctx;
vap->iv_oid = oid;
}
void
ieee80211_sysctl_vdetach(struct ieee80211vap *vap)
{
if (vap->iv_sysctl != NULL) {
sysctl_ctx_free(vap->iv_sysctl);
IEEE80211_FREE(vap->iv_sysctl, M_DEVBUF);
vap->iv_sysctl = NULL;
}
}
int
ieee80211_node_dectestref(struct ieee80211_node *ni)
{
/* XXX need equivalent of atomic_dec_and_test */
atomic_subtract_int(&ni->ni_refcnt, 1);
return atomic_cmpset_int(&ni->ni_refcnt, 0, 1);
}
void
ieee80211_drain_ifq(struct ifqueue *ifq)
{
struct ieee80211_node *ni;
struct mbuf *m;
for (;;) {
IF_DEQUEUE(ifq, m);
if (m == NULL)
break;
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
KASSERT(ni != NULL, ("frame w/o node"));
ieee80211_free_node(ni);
m->m_pkthdr.rcvif = NULL;
m_freem(m);
}
}
void
ieee80211_flush_ifq(struct ifqueue *ifq, struct ieee80211vap *vap)
{
struct ieee80211_node *ni;
struct mbuf *m, **mprev;
IF_LOCK(ifq);
mprev = &ifq->ifq_head;
while ((m = *mprev) != NULL) {
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
if (ni != NULL && ni->ni_vap == vap) {
*mprev = m->m_nextpkt; /* remove from list */
ifq->ifq_len--;
m_freem(m);
ieee80211_free_node(ni); /* reclaim ref */
} else
mprev = &m->m_nextpkt;
}
/* recalculate tail ptr */
m = ifq->ifq_head;
for (; m != NULL && m->m_nextpkt != NULL; m = m->m_nextpkt)
;
ifq->ifq_tail = m;
IF_UNLOCK(ifq);
}
/*
* As above, for mbufs allocated with m_gethdr/MGETHDR
* or initialized by M_COPY_PKTHDR.
*/
#define MC_ALIGN(m, len) \
do { \
(m)->m_data += (MCLBYTES - (len)) &~ (sizeof(long) - 1); \
} while (/* CONSTCOND */ 0)
/*
* Allocate and setup a management frame of the specified
* size. We return the mbuf and a pointer to the start
* of the contiguous data area that's been reserved based
* on the packet length. The data area is forced to 32-bit
* alignment and the buffer length to a multiple of 4 bytes.
* This is done mainly so beacon frames (that require this)
* can use this interface too.
*/
struct mbuf *
ieee80211_getmgtframe(uint8_t **frm, int headroom, int pktlen)
{
struct mbuf *m;
u_int len;
/*
* NB: we know the mbuf routines will align the data area
* so we don't need to do anything special.
*/
len = roundup2(headroom + pktlen, 4);
KASSERT(len <= MCLBYTES, ("802.11 mgt frame too large: %u", len));
if (len < MINCLSIZE) {
m = m_gethdr(M_NOWAIT, MT_DATA);
/*
* Align the data in case additional headers are added.
* This should only happen when a WEP header is added
* which only happens for shared key authentication mgt
* frames which all fit in MHLEN.
*/
if (m != NULL)
M_ALIGN(m, len);
} else {
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m != NULL)
MC_ALIGN(m, len);
}
if (m != NULL) {
m->m_data += headroom;
*frm = m->m_data;
}
return m;
}
#ifndef __NO_STRICT_ALIGNMENT
/*
* Re-align the payload in the mbuf. This is mainly used (right now)
* to handle IP header alignment requirements on certain architectures.
*/
struct mbuf *
ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align)
{
int pktlen, space;
struct mbuf *n;
pktlen = m->m_pkthdr.len;
space = pktlen + align;
if (space < MINCLSIZE)
n = m_gethdr(M_NOWAIT, MT_DATA);
else {
n = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
space <= MCLBYTES ? MCLBYTES :
#if MJUMPAGESIZE != MCLBYTES
space <= MJUMPAGESIZE ? MJUMPAGESIZE :
#endif
space <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES);
}
if (__predict_true(n != NULL)) {
m_move_pkthdr(n, m);
n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
m_copydata(m, 0, pktlen, mtod(n, caddr_t));
n->m_len = pktlen;
} else {
IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
mtod(m, const struct ieee80211_frame *), NULL,
"%s", "no mbuf to realign");
vap->iv_stats.is_rx_badalign++;
}
m_freem(m);
return n;
}
#endif /* !__NO_STRICT_ALIGNMENT */
int
ieee80211_add_callback(struct mbuf *m,
void (*func)(struct ieee80211_node *, void *, int), void *arg)
{
struct m_tag *mtag;
struct ieee80211_cb *cb;
mtag = m_tag_alloc(MTAG_ABI_NET80211, NET80211_TAG_CALLBACK,
sizeof(struct ieee80211_cb), M_NOWAIT);
if (mtag == NULL)
return 0;
cb = (struct ieee80211_cb *)(mtag+1);
cb->func = func;
cb->arg = arg;
m_tag_prepend(m, mtag);
m->m_flags |= M_TXCB;
return 1;
}
int
ieee80211_add_xmit_params(struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
struct m_tag *mtag;
struct ieee80211_tx_params *tx;
mtag = m_tag_alloc(MTAG_ABI_NET80211, NET80211_TAG_XMIT_PARAMS,
sizeof(struct ieee80211_tx_params), M_NOWAIT);
if (mtag == NULL)
return (0);
tx = (struct ieee80211_tx_params *)(mtag+1);
memcpy(&tx->params, params, sizeof(struct ieee80211_bpf_params));
m_tag_prepend(m, mtag);
return (1);
}
int
ieee80211_get_xmit_params(struct mbuf *m,
struct ieee80211_bpf_params *params)
{
struct m_tag *mtag;
struct ieee80211_tx_params *tx;
mtag = m_tag_locate(m, MTAG_ABI_NET80211, NET80211_TAG_XMIT_PARAMS,
NULL);
if (mtag == NULL)
return (-1);
tx = (struct ieee80211_tx_params *)(mtag + 1);
memcpy(params, &tx->params, sizeof(struct ieee80211_bpf_params));
return (0);
}
void
ieee80211_process_callback(struct ieee80211_node *ni,
struct mbuf *m, int status)
{
struct m_tag *mtag;
mtag = m_tag_locate(m, MTAG_ABI_NET80211, NET80211_TAG_CALLBACK, NULL);
if (mtag != NULL) {
struct ieee80211_cb *cb = (struct ieee80211_cb *)(mtag+1);
cb->func(ni, cb->arg, status);
}
}
/*
* Transmit a frame to the parent interface.
*
* TODO: if the transmission fails, make sure the parent node is freed
* (the callers will first need modifying.)
*/
int
-ieee80211_parent_xmitpkt(struct ieee80211com *ic,
- struct mbuf *m)
+ieee80211_parent_xmitpkt(struct ieee80211com *ic, struct mbuf *m)
{
- struct ifnet *parent = ic->ic_ifp;
+ int error;
+
/*
* Assert the IC TX lock is held - this enforces the
* processing -> queuing order is maintained
*/
IEEE80211_TX_LOCK_ASSERT(ic);
-
- return (parent->if_transmit(parent, m));
+ error = ic->ic_transmit(ic, m);
+ if (error)
+ m_freem(m);
+ return (error);
}
/*
* Transmit a frame to the VAP interface.
*/
int
ieee80211_vap_xmitpkt(struct ieee80211vap *vap, struct mbuf *m)
{
struct ifnet *ifp = vap->iv_ifp;
/*
* When transmitting via the VAP, we shouldn't hold
* any IC TX lock as the VAP TX path will acquire it.
*/
IEEE80211_TX_UNLOCK_ASSERT(vap->iv_ic);
return (ifp->if_transmit(ifp, m));
}
#include <sys/libkern.h>
void
get_random_bytes(void *p, size_t n)
{
uint8_t *dp = p;
while (n > 0) {
uint32_t v = arc4random();
size_t nb = n > sizeof(uint32_t) ? sizeof(uint32_t) : n;
bcopy(&v, dp, n > sizeof(uint32_t) ? sizeof(uint32_t) : n);
dp += sizeof(uint32_t), n -= nb;
}
}
/*
* Helper function for events that pass just a single mac address.
*/
static void
notify_macaddr(struct ifnet *ifp, int op, const uint8_t mac[IEEE80211_ADDR_LEN])
{
struct ieee80211_join_event iev;
CURVNET_SET(ifp->if_vnet);
memset(&iev, 0, sizeof(iev));
IEEE80211_ADDR_COPY(iev.iev_addr, mac);
rt_ieee80211msg(ifp, op, &iev, sizeof(iev));
CURVNET_RESTORE();
}
void
ieee80211_notify_node_join(struct ieee80211_node *ni, int newassoc)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ifnet *ifp = vap->iv_ifp;
CURVNET_SET_QUIET(ifp->if_vnet);
IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode join",
(ni == vap->iv_bss) ? "bss " : "");
if (ni == vap->iv_bss) {
notify_macaddr(ifp, newassoc ?
RTM_IEEE80211_ASSOC : RTM_IEEE80211_REASSOC, ni->ni_bssid);
if_link_state_change(ifp, LINK_STATE_UP);
} else {
notify_macaddr(ifp, newassoc ?
RTM_IEEE80211_JOIN : RTM_IEEE80211_REJOIN, ni->ni_macaddr);
}
CURVNET_RESTORE();
}
void
ieee80211_notify_node_leave(struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ifnet *ifp = vap->iv_ifp;
CURVNET_SET_QUIET(ifp->if_vnet);
IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%snode leave",
(ni == vap->iv_bss) ? "bss " : "");
if (ni == vap->iv_bss) {
rt_ieee80211msg(ifp, RTM_IEEE80211_DISASSOC, NULL, 0);
if_link_state_change(ifp, LINK_STATE_DOWN);
} else {
/* fire off wireless event station leaving */
notify_macaddr(ifp, RTM_IEEE80211_LEAVE, ni->ni_macaddr);
}
CURVNET_RESTORE();
}
void
ieee80211_notify_scan_done(struct ieee80211vap *vap)
{
struct ifnet *ifp = vap->iv_ifp;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s\n", "notify scan done");
/* dispatch wireless event indicating scan completed */
CURVNET_SET(ifp->if_vnet);
rt_ieee80211msg(ifp, RTM_IEEE80211_SCAN, NULL, 0);
CURVNET_RESTORE();
}
void
ieee80211_notify_replay_failure(struct ieee80211vap *vap,
const struct ieee80211_frame *wh, const struct ieee80211_key *k,
u_int64_t rsc, int tid)
{
struct ifnet *ifp = vap->iv_ifp;
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
"%s replay detected tid %d <rsc %ju, csc %ju, keyix %u rxkeyix %u>",
k->wk_cipher->ic_name, tid, (intmax_t) rsc,
(intmax_t) k->wk_keyrsc[tid],
k->wk_keyix, k->wk_rxkeyix);
if (ifp != NULL) { /* NB: for cipher test modules */
struct ieee80211_replay_event iev;
IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
iev.iev_cipher = k->wk_cipher->ic_cipher;
if (k->wk_rxkeyix != IEEE80211_KEYIX_NONE)
iev.iev_keyix = k->wk_rxkeyix;
else
iev.iev_keyix = k->wk_keyix;
iev.iev_keyrsc = k->wk_keyrsc[tid];
iev.iev_rsc = rsc;
CURVNET_SET(ifp->if_vnet);
rt_ieee80211msg(ifp, RTM_IEEE80211_REPLAY, &iev, sizeof(iev));
CURVNET_RESTORE();
}
}
void
ieee80211_notify_michael_failure(struct ieee80211vap *vap,
const struct ieee80211_frame *wh, u_int keyix)
{
struct ifnet *ifp = vap->iv_ifp;
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2,
"michael MIC verification failed <keyix %u>", keyix);
vap->iv_stats.is_rx_tkipmic++;
if (ifp != NULL) { /* NB: for cipher test modules */
struct ieee80211_michael_event iev;
IEEE80211_ADDR_COPY(iev.iev_dst, wh->i_addr1);
IEEE80211_ADDR_COPY(iev.iev_src, wh->i_addr2);
iev.iev_cipher = IEEE80211_CIPHER_TKIP;
iev.iev_keyix = keyix;
CURVNET_SET(ifp->if_vnet);
rt_ieee80211msg(ifp, RTM_IEEE80211_MICHAEL, &iev, sizeof(iev));
CURVNET_RESTORE();
}
}
void
ieee80211_notify_wds_discover(struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ifnet *ifp = vap->iv_ifp;
notify_macaddr(ifp, RTM_IEEE80211_WDS, ni->ni_macaddr);
}
void
ieee80211_notify_csa(struct ieee80211com *ic,
const struct ieee80211_channel *c, int mode, int count)
{
struct ieee80211_csa_event iev;
struct ieee80211vap *vap;
struct ifnet *ifp;
memset(&iev, 0, sizeof(iev));
iev.iev_flags = c->ic_flags;
iev.iev_freq = c->ic_freq;
iev.iev_ieee = c->ic_ieee;
iev.iev_mode = mode;
iev.iev_count = count;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
ifp = vap->iv_ifp;
CURVNET_SET(ifp->if_vnet);
rt_ieee80211msg(ifp, RTM_IEEE80211_CSA, &iev, sizeof(iev));
CURVNET_RESTORE();
}
}
void
ieee80211_notify_radar(struct ieee80211com *ic,
const struct ieee80211_channel *c)
{
struct ieee80211_radar_event iev;
struct ieee80211vap *vap;
struct ifnet *ifp;
memset(&iev, 0, sizeof(iev));
iev.iev_flags = c->ic_flags;
iev.iev_freq = c->ic_freq;
iev.iev_ieee = c->ic_ieee;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
ifp = vap->iv_ifp;
CURVNET_SET(ifp->if_vnet);
rt_ieee80211msg(ifp, RTM_IEEE80211_RADAR, &iev, sizeof(iev));
CURVNET_RESTORE();
}
}
void
ieee80211_notify_cac(struct ieee80211com *ic,
const struct ieee80211_channel *c, enum ieee80211_notify_cac_event type)
{
struct ieee80211_cac_event iev;
struct ieee80211vap *vap;
struct ifnet *ifp;
memset(&iev, 0, sizeof(iev));
iev.iev_flags = c->ic_flags;
iev.iev_freq = c->ic_freq;
iev.iev_ieee = c->ic_ieee;
iev.iev_type = type;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
ifp = vap->iv_ifp;
CURVNET_SET(ifp->if_vnet);
rt_ieee80211msg(ifp, RTM_IEEE80211_CAC, &iev, sizeof(iev));
CURVNET_RESTORE();
}
}
void
ieee80211_notify_node_deauth(struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ifnet *ifp = vap->iv_ifp;
IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node deauth");
notify_macaddr(ifp, RTM_IEEE80211_DEAUTH, ni->ni_macaddr);
}
void
ieee80211_notify_node_auth(struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ifnet *ifp = vap->iv_ifp;
IEEE80211_NOTE(vap, IEEE80211_MSG_NODE, ni, "%s", "node auth");
notify_macaddr(ifp, RTM_IEEE80211_AUTH, ni->ni_macaddr);
}
void
ieee80211_notify_country(struct ieee80211vap *vap,
const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t cc[2])
{
struct ifnet *ifp = vap->iv_ifp;
struct ieee80211_country_event iev;
memset(&iev, 0, sizeof(iev));
IEEE80211_ADDR_COPY(iev.iev_addr, bssid);
iev.iev_cc[0] = cc[0];
iev.iev_cc[1] = cc[1];
CURVNET_SET(ifp->if_vnet);
rt_ieee80211msg(ifp, RTM_IEEE80211_COUNTRY, &iev, sizeof(iev));
CURVNET_RESTORE();
}
void
ieee80211_notify_radio(struct ieee80211com *ic, int state)
{
struct ieee80211_radio_event iev;
struct ieee80211vap *vap;
struct ifnet *ifp;
memset(&iev, 0, sizeof(iev));
iev.iev_state = state;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
ifp = vap->iv_ifp;
CURVNET_SET(ifp->if_vnet);
rt_ieee80211msg(ifp, RTM_IEEE80211_RADIO, &iev, sizeof(iev));
CURVNET_RESTORE();
}
}
void
ieee80211_load_module(const char *modname)
{
#ifdef notyet
(void)kern_kldload(curthread, modname, NULL);
#else
printf("%s: load the %s module by hand for now.\n", __func__, modname);
#endif
}
static eventhandler_tag wlan_bpfevent;
-static eventhandler_tag wlan_ifllevent;
static void
bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach)
{
/* NB: identify vap's by if_init */
if (dlt == DLT_IEEE802_11_RADIO &&
ifp->if_init == ieee80211_init) {
struct ieee80211vap *vap = ifp->if_softc;
/*
* Track bpf radiotap listener state. We mark the vap
* to indicate if any listener is present and the com
* to indicate if any listener exists on any associated
* vap. This flag is used by drivers to prepare radiotap
* state only when needed.
*/
if (attach) {
ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF);
if (vap->iv_opmode == IEEE80211_M_MONITOR)
atomic_add_int(&vap->iv_ic->ic_montaps, 1);
} else if (!bpf_peers_present(vap->iv_rawbpf)) {
ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF);
if (vap->iv_opmode == IEEE80211_M_MONITOR)
atomic_subtract_int(&vap->iv_ic->ic_montaps, 1);
}
}
}
-static void
-wlan_iflladdr(void *arg __unused, struct ifnet *ifp)
-{
- struct ieee80211com *ic = ifp->if_l2com;
- struct ieee80211vap *vap, *next;
-
- if (ifp->if_type != IFT_IEEE80211 || ic == NULL)
- return;
-
- IEEE80211_LOCK(ic);
- TAILQ_FOREACH_SAFE(vap, &ic->ic_vaps, iv_next, next) {
- /*
- * If the MAC address has changed on the parent and it was
- * copied to the vap on creation then re-sync.
- */
- if (vap->iv_ic == ic &&
- (vap->iv_flags_ext & IEEE80211_FEXT_UNIQMAC) == 0) {
- IEEE80211_ADDR_COPY(vap->iv_myaddr, IF_LLADDR(ifp));
- IEEE80211_UNLOCK(ic);
- if_setlladdr(vap->iv_ifp, IF_LLADDR(ifp),
- IEEE80211_ADDR_LEN);
- IEEE80211_LOCK(ic);
- }
- }
- IEEE80211_UNLOCK(ic);
-}
-
/*
* Module glue.
*
* NB: the module name is "wlan" for compatibility with NetBSD.
*/
static int
wlan_modevent(module_t mod, int type, void *unused)
{
switch (type) {
case MOD_LOAD:
if (bootverbose)
printf("wlan: <802.11 Link Layer>\n");
wlan_bpfevent = EVENTHANDLER_REGISTER(bpf_track,
bpf_track, 0, EVENTHANDLER_PRI_ANY);
- wlan_ifllevent = EVENTHANDLER_REGISTER(iflladdr_event,
- wlan_iflladdr, NULL, EVENTHANDLER_PRI_ANY);
wlan_cloner = if_clone_simple(wlanname, wlan_clone_create,
wlan_clone_destroy, 0);
- if_register_com_alloc(IFT_IEEE80211, wlan_alloc, wlan_free);
return 0;
case MOD_UNLOAD:
- if_deregister_com_alloc(IFT_IEEE80211);
if_clone_detach(wlan_cloner);
EVENTHANDLER_DEREGISTER(bpf_track, wlan_bpfevent);
- EVENTHANDLER_DEREGISTER(iflladdr_event, wlan_ifllevent);
return 0;
}
return EINVAL;
}
static moduledata_t wlan_mod = {
wlanname,
wlan_modevent,
0
};
DECLARE_MODULE(wlan, wlan_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
MODULE_VERSION(wlan, 1);
MODULE_DEPEND(wlan, ether, 1, 1, 1);
#ifdef IEEE80211_ALQ
MODULE_DEPEND(wlan, alq, 1, 1, 1);
#endif /* IEEE80211_ALQ */
Index: head/sys/net80211/ieee80211_ioctl.c
===================================================================
--- head/sys/net80211/ieee80211_ioctl.c (revision 287196)
+++ head/sys/net80211/ieee80211_ioctl.c (revision 287197)
@@ -1,3443 +1,3404 @@
/*-
* Copyright (c) 2001 Atsushi Onoe
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* IEEE 802.11 ioctl support (FreeBSD-specific)
*/
#include "opt_inet.h"
#include "opt_wlan.h"
#include <sys/endian.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/priv.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/systm.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/ethernet.h>
#ifdef INET
#include <netinet/in.h>
#include <netinet/if_ether.h>
#endif
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_ioctl.h>
#include <net80211/ieee80211_regdomain.h>
#include <net80211/ieee80211_input.h>
#define IS_UP_AUTO(_vap) \
(IFNET_IS_UP_RUNNING((_vap)->iv_ifp) && \
(_vap)->iv_roaming == IEEE80211_ROAMING_AUTO)
static const uint8_t zerobssid[IEEE80211_ADDR_LEN];
static struct ieee80211_channel *findchannel(struct ieee80211com *,
int ieee, int mode);
static int ieee80211_scanreq(struct ieee80211vap *,
struct ieee80211_scan_req *);
static __noinline int
ieee80211_ioctl_getkey(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_node *ni;
struct ieee80211req_key ik;
struct ieee80211_key *wk;
const struct ieee80211_cipher *cip;
u_int kid;
int error;
if (ireq->i_len != sizeof(ik))
return EINVAL;
error = copyin(ireq->i_data, &ik, sizeof(ik));
if (error)
return error;
kid = ik.ik_keyix;
if (kid == IEEE80211_KEYIX_NONE) {
ni = ieee80211_find_vap_node(&ic->ic_sta, vap, ik.ik_macaddr);
if (ni == NULL)
return ENOENT;
wk = &ni->ni_ucastkey;
} else {
if (kid >= IEEE80211_WEP_NKID)
return EINVAL;
wk = &vap->iv_nw_keys[kid];
IEEE80211_ADDR_COPY(&ik.ik_macaddr, vap->iv_bss->ni_macaddr);
ni = NULL;
}
cip = wk->wk_cipher;
ik.ik_type = cip->ic_cipher;
ik.ik_keylen = wk->wk_keylen;
ik.ik_flags = wk->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV);
if (wk->wk_keyix == vap->iv_def_txkey)
ik.ik_flags |= IEEE80211_KEY_DEFAULT;
if (priv_check(curthread, PRIV_NET80211_GETKEY) == 0) {
/* NB: only root can read key data */
ik.ik_keyrsc = wk->wk_keyrsc[IEEE80211_NONQOS_TID];
ik.ik_keytsc = wk->wk_keytsc;
memcpy(ik.ik_keydata, wk->wk_key, wk->wk_keylen);
if (cip->ic_cipher == IEEE80211_CIPHER_TKIP) {
memcpy(ik.ik_keydata+wk->wk_keylen,
wk->wk_key + IEEE80211_KEYBUF_SIZE,
IEEE80211_MICBUF_SIZE);
ik.ik_keylen += IEEE80211_MICBUF_SIZE;
}
} else {
ik.ik_keyrsc = 0;
ik.ik_keytsc = 0;
memset(ik.ik_keydata, 0, sizeof(ik.ik_keydata));
}
if (ni != NULL)
ieee80211_free_node(ni);
return copyout(&ik, ireq->i_data, sizeof(ik));
}
static __noinline int
ieee80211_ioctl_getchanlist(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
if (sizeof(ic->ic_chan_active) < ireq->i_len)
ireq->i_len = sizeof(ic->ic_chan_active);
return copyout(&ic->ic_chan_active, ireq->i_data, ireq->i_len);
}
static __noinline int
ieee80211_ioctl_getchaninfo(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
uint32_t space;
space = __offsetof(struct ieee80211req_chaninfo,
ic_chans[ic->ic_nchans]);
if (space > ireq->i_len)
space = ireq->i_len;
/* XXX assumes compatible layout */
return copyout(&ic->ic_nchans, ireq->i_data, space);
}
static __noinline int
ieee80211_ioctl_getwpaie(struct ieee80211vap *vap,
struct ieee80211req *ireq, int req)
{
struct ieee80211_node *ni;
struct ieee80211req_wpaie2 wpaie;
int error;
if (ireq->i_len < IEEE80211_ADDR_LEN)
return EINVAL;
error = copyin(ireq->i_data, wpaie.wpa_macaddr, IEEE80211_ADDR_LEN);
if (error != 0)
return error;
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, wpaie.wpa_macaddr);
if (ni == NULL)
return ENOENT;
memset(wpaie.wpa_ie, 0, sizeof(wpaie.wpa_ie));
if (ni->ni_ies.wpa_ie != NULL) {
int ielen = ni->ni_ies.wpa_ie[1] + 2;
if (ielen > sizeof(wpaie.wpa_ie))
ielen = sizeof(wpaie.wpa_ie);
memcpy(wpaie.wpa_ie, ni->ni_ies.wpa_ie, ielen);
}
if (req == IEEE80211_IOC_WPAIE2) {
memset(wpaie.rsn_ie, 0, sizeof(wpaie.rsn_ie));
if (ni->ni_ies.rsn_ie != NULL) {
int ielen = ni->ni_ies.rsn_ie[1] + 2;
if (ielen > sizeof(wpaie.rsn_ie))
ielen = sizeof(wpaie.rsn_ie);
memcpy(wpaie.rsn_ie, ni->ni_ies.rsn_ie, ielen);
}
if (ireq->i_len > sizeof(struct ieee80211req_wpaie2))
ireq->i_len = sizeof(struct ieee80211req_wpaie2);
} else {
/* compatibility op, may overwrite wpa ie */
/* XXX check ic_flags? */
if (ni->ni_ies.rsn_ie != NULL) {
int ielen = ni->ni_ies.rsn_ie[1] + 2;
if (ielen > sizeof(wpaie.wpa_ie))
ielen = sizeof(wpaie.wpa_ie);
memcpy(wpaie.wpa_ie, ni->ni_ies.rsn_ie, ielen);
}
if (ireq->i_len > sizeof(struct ieee80211req_wpaie))
ireq->i_len = sizeof(struct ieee80211req_wpaie);
}
ieee80211_free_node(ni);
return copyout(&wpaie, ireq->i_data, ireq->i_len);
}
static __noinline int
ieee80211_ioctl_getstastats(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211_node *ni;
uint8_t macaddr[IEEE80211_ADDR_LEN];
const size_t off = __offsetof(struct ieee80211req_sta_stats, is_stats);
int error;
if (ireq->i_len < off)
return EINVAL;
error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN);
if (error != 0)
return error;
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr);
if (ni == NULL)
return ENOENT;
if (ireq->i_len > sizeof(struct ieee80211req_sta_stats))
ireq->i_len = sizeof(struct ieee80211req_sta_stats);
/* NB: copy out only the statistics */
error = copyout(&ni->ni_stats, (uint8_t *) ireq->i_data + off,
ireq->i_len - off);
ieee80211_free_node(ni);
return error;
}
struct scanreq {
struct ieee80211req_scan_result *sr;
size_t space;
};
static size_t
scan_space(const struct ieee80211_scan_entry *se, int *ielen)
{
size_t len;
*ielen = se->se_ies.len;
/*
* NB: ie's can be no more than 255 bytes and the max 802.11
* packet is <3Kbytes so we are sure this doesn't overflow
* 16-bits; if this is a concern we can drop the ie's.
*/
len = sizeof(struct ieee80211req_scan_result) + se->se_ssid[1] +
se->se_meshid[1] + *ielen;
return roundup(len, sizeof(uint32_t));
}
static void
get_scan_space(void *arg, const struct ieee80211_scan_entry *se)
{
struct scanreq *req = arg;
int ielen;
req->space += scan_space(se, &ielen);
}
static __noinline void
get_scan_result(void *arg, const struct ieee80211_scan_entry *se)
{
struct scanreq *req = arg;
struct ieee80211req_scan_result *sr;
int ielen, len, nr, nxr;
uint8_t *cp;
len = scan_space(se, &ielen);
if (len > req->space)
return;
sr = req->sr;
KASSERT(len <= 65535 && ielen <= 65535,
("len %u ssid %u ie %u", len, se->se_ssid[1], ielen));
sr->isr_len = len;
sr->isr_ie_off = sizeof(struct ieee80211req_scan_result);
sr->isr_ie_len = ielen;
sr->isr_freq = se->se_chan->ic_freq;
sr->isr_flags = se->se_chan->ic_flags;
sr->isr_rssi = se->se_rssi;
sr->isr_noise = se->se_noise;
sr->isr_intval = se->se_intval;
sr->isr_capinfo = se->se_capinfo;
sr->isr_erp = se->se_erp;
IEEE80211_ADDR_COPY(sr->isr_bssid, se->se_bssid);
nr = min(se->se_rates[1], IEEE80211_RATE_MAXSIZE);
memcpy(sr->isr_rates, se->se_rates+2, nr);
nxr = min(se->se_xrates[1], IEEE80211_RATE_MAXSIZE - nr);
memcpy(sr->isr_rates+nr, se->se_xrates+2, nxr);
sr->isr_nrates = nr + nxr;
/* copy SSID */
sr->isr_ssid_len = se->se_ssid[1];
cp = ((uint8_t *)sr) + sr->isr_ie_off;
memcpy(cp, se->se_ssid+2, sr->isr_ssid_len);
/* copy mesh id */
cp += sr->isr_ssid_len;
sr->isr_meshid_len = se->se_meshid[1];
memcpy(cp, se->se_meshid+2, sr->isr_meshid_len);
cp += sr->isr_meshid_len;
if (ielen)
memcpy(cp, se->se_ies.data, ielen);
req->space -= len;
req->sr = (struct ieee80211req_scan_result *)(((uint8_t *)sr) + len);
}
static __noinline int
ieee80211_ioctl_getscanresults(struct ieee80211vap *vap,
struct ieee80211req *ireq)
{
struct scanreq req;
int error;
if (ireq->i_len < sizeof(struct scanreq))
return EFAULT;
error = 0;
req.space = 0;
ieee80211_scan_iterate(vap, get_scan_space, &req);
if (req.space > ireq->i_len)
req.space = ireq->i_len;
if (req.space > 0) {
uint32_t space;
void *p;
space = req.space;
/* XXX M_WAITOK after driver lock released */
p = IEEE80211_MALLOC(space, M_TEMP,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (p == NULL)
return ENOMEM;
req.sr = p;
ieee80211_scan_iterate(vap, get_scan_result, &req);
ireq->i_len = space - req.space;
error = copyout(p, ireq->i_data, ireq->i_len);
IEEE80211_FREE(p, M_TEMP);
} else
ireq->i_len = 0;
return error;
}
struct stainforeq {
struct ieee80211vap *vap;
struct ieee80211req_sta_info *si;
size_t space;
};
static size_t
sta_space(const struct ieee80211_node *ni, size_t *ielen)
{
*ielen = ni->ni_ies.len;
return roundup(sizeof(struct ieee80211req_sta_info) + *ielen,
sizeof(uint32_t));
}
static void
get_sta_space(void *arg, struct ieee80211_node *ni)
{
struct stainforeq *req = arg;
size_t ielen;
if (req->vap != ni->ni_vap)
return;
if (ni->ni_vap->iv_opmode == IEEE80211_M_HOSTAP &&
ni->ni_associd == 0) /* only associated stations */
return;
req->space += sta_space(ni, &ielen);
}
static __noinline void
get_sta_info(void *arg, struct ieee80211_node *ni)
{
struct stainforeq *req = arg;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211req_sta_info *si;
size_t ielen, len;
uint8_t *cp;
if (req->vap != ni->ni_vap)
return;
if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
ni->ni_associd == 0) /* only associated stations */
return;
if (ni->ni_chan == IEEE80211_CHAN_ANYC) /* XXX bogus entry */
return;
len = sta_space(ni, &ielen);
if (len > req->space)
return;
si = req->si;
si->isi_len = len;
si->isi_ie_off = sizeof(struct ieee80211req_sta_info);
si->isi_ie_len = ielen;
si->isi_freq = ni->ni_chan->ic_freq;
si->isi_flags = ni->ni_chan->ic_flags;
si->isi_state = ni->ni_flags;
si->isi_authmode = ni->ni_authmode;
vap->iv_ic->ic_node_getsignal(ni, &si->isi_rssi, &si->isi_noise);
vap->iv_ic->ic_node_getmimoinfo(ni, &si->isi_mimo);
si->isi_capinfo = ni->ni_capinfo;
si->isi_erp = ni->ni_erp;
IEEE80211_ADDR_COPY(si->isi_macaddr, ni->ni_macaddr);
si->isi_nrates = ni->ni_rates.rs_nrates;
if (si->isi_nrates > 15)
si->isi_nrates = 15;
memcpy(si->isi_rates, ni->ni_rates.rs_rates, si->isi_nrates);
si->isi_txrate = ni->ni_txrate;
if (si->isi_txrate & IEEE80211_RATE_MCS) {
const struct ieee80211_mcs_rates *mcs =
&ieee80211_htrates[ni->ni_txrate &~ IEEE80211_RATE_MCS];
if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
if (ni->ni_flags & IEEE80211_NODE_SGI40)
si->isi_txmbps = mcs->ht40_rate_800ns;
else
si->isi_txmbps = mcs->ht40_rate_400ns;
} else {
if (ni->ni_flags & IEEE80211_NODE_SGI20)
si->isi_txmbps = mcs->ht20_rate_800ns;
else
si->isi_txmbps = mcs->ht20_rate_400ns;
}
} else
si->isi_txmbps = si->isi_txrate;
si->isi_associd = ni->ni_associd;
si->isi_txpower = ni->ni_txpower;
si->isi_vlan = ni->ni_vlan;
if (ni->ni_flags & IEEE80211_NODE_QOS) {
memcpy(si->isi_txseqs, ni->ni_txseqs, sizeof(ni->ni_txseqs));
memcpy(si->isi_rxseqs, ni->ni_rxseqs, sizeof(ni->ni_rxseqs));
} else {
si->isi_txseqs[0] = ni->ni_txseqs[IEEE80211_NONQOS_TID];
si->isi_rxseqs[0] = ni->ni_rxseqs[IEEE80211_NONQOS_TID];
}
/* NB: leave all cases in case we relax ni_associd == 0 check */
if (ieee80211_node_is_authorized(ni))
si->isi_inact = vap->iv_inact_run;
else if (ni->ni_associd != 0 ||
(vap->iv_opmode == IEEE80211_M_WDS &&
(vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY)))
si->isi_inact = vap->iv_inact_auth;
else
si->isi_inact = vap->iv_inact_init;
si->isi_inact = (si->isi_inact - ni->ni_inact) * IEEE80211_INACT_WAIT;
si->isi_localid = ni->ni_mllid;
si->isi_peerid = ni->ni_mlpid;
si->isi_peerstate = ni->ni_mlstate;
if (ielen) {
cp = ((uint8_t *)si) + si->isi_ie_off;
memcpy(cp, ni->ni_ies.data, ielen);
}
req->si = (struct ieee80211req_sta_info *)(((uint8_t *)si) + len);
req->space -= len;
}
static __noinline int
getstainfo_common(struct ieee80211vap *vap, struct ieee80211req *ireq,
struct ieee80211_node *ni, size_t off)
{
struct ieee80211com *ic = vap->iv_ic;
struct stainforeq req;
size_t space;
void *p;
int error;
error = 0;
req.space = 0;
req.vap = vap;
if (ni == NULL)
ieee80211_iterate_nodes(&ic->ic_sta, get_sta_space, &req);
else
get_sta_space(&req, ni);
if (req.space > ireq->i_len)
req.space = ireq->i_len;
if (req.space > 0) {
space = req.space;
/* XXX M_WAITOK after driver lock released */
p = IEEE80211_MALLOC(space, M_TEMP,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (p == NULL) {
error = ENOMEM;
goto bad;
}
req.si = p;
if (ni == NULL)
ieee80211_iterate_nodes(&ic->ic_sta, get_sta_info, &req);
else
get_sta_info(&req, ni);
ireq->i_len = space - req.space;
error = copyout(p, (uint8_t *) ireq->i_data+off, ireq->i_len);
IEEE80211_FREE(p, M_TEMP);
} else
ireq->i_len = 0;
bad:
if (ni != NULL)
ieee80211_free_node(ni);
return error;
}
static __noinline int
ieee80211_ioctl_getstainfo(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
uint8_t macaddr[IEEE80211_ADDR_LEN];
const size_t off = __offsetof(struct ieee80211req_sta_req, info);
struct ieee80211_node *ni;
int error;
if (ireq->i_len < sizeof(struct ieee80211req_sta_req))
return EFAULT;
error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN);
if (error != 0)
return error;
if (IEEE80211_ADDR_EQ(macaddr, vap->iv_ifp->if_broadcastaddr)) {
ni = NULL;
} else {
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr);
if (ni == NULL)
return ENOENT;
}
return getstainfo_common(vap, ireq, ni, off);
}
static __noinline int
ieee80211_ioctl_getstatxpow(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211_node *ni;
struct ieee80211req_sta_txpow txpow;
int error;
if (ireq->i_len != sizeof(txpow))
return EINVAL;
error = copyin(ireq->i_data, &txpow, sizeof(txpow));
if (error != 0)
return error;
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, txpow.it_macaddr);
if (ni == NULL)
return ENOENT;
txpow.it_txpow = ni->ni_txpower;
error = copyout(&txpow, ireq->i_data, sizeof(txpow));
ieee80211_free_node(ni);
return error;
}
static __noinline int
ieee80211_ioctl_getwmeparam(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_wme_state *wme = &ic->ic_wme;
struct wmeParams *wmep;
int ac;
if ((ic->ic_caps & IEEE80211_C_WME) == 0)
return EINVAL;
ac = (ireq->i_len & IEEE80211_WMEPARAM_VAL);
if (ac >= WME_NUM_AC)
ac = WME_AC_BE;
if (ireq->i_len & IEEE80211_WMEPARAM_BSS)
wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac];
else
wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac];
switch (ireq->i_type) {
case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */
ireq->i_val = wmep->wmep_logcwmin;
break;
case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */
ireq->i_val = wmep->wmep_logcwmax;
break;
case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */
ireq->i_val = wmep->wmep_aifsn;
break;
case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */
ireq->i_val = wmep->wmep_txopLimit;
break;
case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */
wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac];
ireq->i_val = wmep->wmep_acm;
break;
case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only)*/
wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac];
ireq->i_val = !wmep->wmep_noackPolicy;
break;
}
return 0;
}
static __noinline int
ieee80211_ioctl_getmaccmd(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
const struct ieee80211_aclator *acl = vap->iv_acl;
return (acl == NULL ? EINVAL : acl->iac_getioctl(vap, ireq));
}
static __noinline int
ieee80211_ioctl_getcurchan(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_channel *c;
if (ireq->i_len != sizeof(struct ieee80211_channel))
return EINVAL;
/*
* vap's may have different operating channels when HT is
* in use. When in RUN state report the vap-specific channel.
* Otherwise return curchan.
*/
if (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)
c = vap->iv_bss->ni_chan;
else
c = ic->ic_curchan;
return copyout(c, ireq->i_data, sizeof(*c));
}
static int
getappie(const struct ieee80211_appie *aie, struct ieee80211req *ireq)
{
if (aie == NULL)
return EINVAL;
/* NB: truncate, caller can check length */
if (ireq->i_len > aie->ie_len)
ireq->i_len = aie->ie_len;
return copyout(aie->ie_data, ireq->i_data, ireq->i_len);
}
static int
ieee80211_ioctl_getappie(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
uint8_t fc0;
fc0 = ireq->i_val & 0xff;
if ((fc0 & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
return EINVAL;
/* NB: could check iv_opmode and reject but hardly worth the effort */
switch (fc0 & IEEE80211_FC0_SUBTYPE_MASK) {
case IEEE80211_FC0_SUBTYPE_BEACON:
return getappie(vap->iv_appie_beacon, ireq);
case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
return getappie(vap->iv_appie_proberesp, ireq);
case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
return getappie(vap->iv_appie_assocresp, ireq);
case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
return getappie(vap->iv_appie_probereq, ireq);
case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
return getappie(vap->iv_appie_assocreq, ireq);
case IEEE80211_FC0_SUBTYPE_BEACON|IEEE80211_FC0_SUBTYPE_PROBE_RESP:
return getappie(vap->iv_appie_wpa, ireq);
}
return EINVAL;
}
static __noinline int
ieee80211_ioctl_getregdomain(struct ieee80211vap *vap,
const struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
if (ireq->i_len != sizeof(ic->ic_regdomain))
return EINVAL;
return copyout(&ic->ic_regdomain, ireq->i_data,
sizeof(ic->ic_regdomain));
}
static __noinline int
ieee80211_ioctl_getroam(struct ieee80211vap *vap,
const struct ieee80211req *ireq)
{
size_t len = ireq->i_len;
/* NB: accept short requests for backwards compat */
if (len > sizeof(vap->iv_roamparms))
len = sizeof(vap->iv_roamparms);
return copyout(vap->iv_roamparms, ireq->i_data, len);
}
static __noinline int
ieee80211_ioctl_gettxparams(struct ieee80211vap *vap,
const struct ieee80211req *ireq)
{
size_t len = ireq->i_len;
/* NB: accept short requests for backwards compat */
if (len > sizeof(vap->iv_txparms))
len = sizeof(vap->iv_txparms);
return copyout(vap->iv_txparms, ireq->i_data, len);
}
static __noinline int
ieee80211_ioctl_getdevcaps(struct ieee80211com *ic,
const struct ieee80211req *ireq)
{
struct ieee80211_devcaps_req *dc;
struct ieee80211req_chaninfo *ci;
int maxchans, error;
maxchans = 1 + ((ireq->i_len - sizeof(struct ieee80211_devcaps_req)) /
sizeof(struct ieee80211_channel));
/* NB: require 1 so we know ic_nchans is accessible */
if (maxchans < 1)
return EINVAL;
/* constrain max request size, 2K channels is ~24Kbytes */
if (maxchans > 2048)
maxchans = 2048;
dc = (struct ieee80211_devcaps_req *)
IEEE80211_MALLOC(IEEE80211_DEVCAPS_SIZE(maxchans), M_TEMP,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (dc == NULL)
return ENOMEM;
dc->dc_drivercaps = ic->ic_caps;
dc->dc_cryptocaps = ic->ic_cryptocaps;
dc->dc_htcaps = ic->ic_htcaps;
ci = &dc->dc_chaninfo;
ic->ic_getradiocaps(ic, maxchans, &ci->ic_nchans, ci->ic_chans);
KASSERT(ci->ic_nchans <= maxchans,
("nchans %d maxchans %d", ci->ic_nchans, maxchans));
ieee80211_sort_channels(ci->ic_chans, ci->ic_nchans);
error = copyout(dc, ireq->i_data, IEEE80211_DEVCAPS_SPACE(dc));
IEEE80211_FREE(dc, M_TEMP);
return error;
}
static __noinline int
ieee80211_ioctl_getstavlan(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211_node *ni;
struct ieee80211req_sta_vlan vlan;
int error;
if (ireq->i_len != sizeof(vlan))
return EINVAL;
error = copyin(ireq->i_data, &vlan, sizeof(vlan));
if (error != 0)
return error;
if (!IEEE80211_ADDR_EQ(vlan.sv_macaddr, zerobssid)) {
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap,
vlan.sv_macaddr);
if (ni == NULL)
return ENOENT;
} else
ni = ieee80211_ref_node(vap->iv_bss);
vlan.sv_vlan = ni->ni_vlan;
error = copyout(&vlan, ireq->i_data, sizeof(vlan));
ieee80211_free_node(ni);
return error;
}
/*
* Dummy ioctl get handler so the linker set is defined.
*/
static int
dummy_ioctl_get(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
return ENOSYS;
}
IEEE80211_IOCTL_GET(dummy, dummy_ioctl_get);
static int
ieee80211_ioctl_getdefault(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
ieee80211_ioctl_getfunc * const *get;
int error;
SET_FOREACH(get, ieee80211_ioctl_getset) {
error = (*get)(vap, ireq);
if (error != ENOSYS)
return error;
}
return EINVAL;
}
/*
* When building the kernel with -O2 on the i386 architecture, gcc
* seems to want to inline this function into ieee80211_ioctl()
* (which is the only routine that calls it). When this happens,
* ieee80211_ioctl() ends up consuming an additional 2K of stack
* space. (Exactly why it needs so much is unclear.) The problem
* is that it's possible for ieee80211_ioctl() to invoke other
* routines (including driver init functions) which could then find
* themselves perilously close to exhausting the stack.
*
* To avoid this, we deliberately prevent gcc from inlining this
* routine. Another way to avoid this is to use less agressive
* optimization when compiling this file (i.e. -O instead of -O2)
* but special-casing the compilation of this one module in the
* build system would be awkward.
*/
static __noinline int
ieee80211_ioctl_get80211(struct ieee80211vap *vap, u_long cmd,
struct ieee80211req *ireq)
{
#define MS(_v, _f) (((_v) & _f) >> _f##_S)
struct ieee80211com *ic = vap->iv_ic;
u_int kid, len;
uint8_t tmpkey[IEEE80211_KEYBUF_SIZE];
char tmpssid[IEEE80211_NWID_LEN];
int error = 0;
switch (ireq->i_type) {
case IEEE80211_IOC_SSID:
switch (vap->iv_state) {
case IEEE80211_S_INIT:
case IEEE80211_S_SCAN:
ireq->i_len = vap->iv_des_ssid[0].len;
memcpy(tmpssid, vap->iv_des_ssid[0].ssid, ireq->i_len);
break;
default:
ireq->i_len = vap->iv_bss->ni_esslen;
memcpy(tmpssid, vap->iv_bss->ni_essid, ireq->i_len);
break;
}
error = copyout(tmpssid, ireq->i_data, ireq->i_len);
break;
case IEEE80211_IOC_NUMSSIDS:
ireq->i_val = 1;
break;
case IEEE80211_IOC_WEP:
if ((vap->iv_flags & IEEE80211_F_PRIVACY) == 0)
ireq->i_val = IEEE80211_WEP_OFF;
else if (vap->iv_flags & IEEE80211_F_DROPUNENC)
ireq->i_val = IEEE80211_WEP_ON;
else
ireq->i_val = IEEE80211_WEP_MIXED;
break;
case IEEE80211_IOC_WEPKEY:
kid = (u_int) ireq->i_val;
if (kid >= IEEE80211_WEP_NKID)
return EINVAL;
len = (u_int) vap->iv_nw_keys[kid].wk_keylen;
/* NB: only root can read WEP keys */
if (priv_check(curthread, PRIV_NET80211_GETKEY) == 0) {
bcopy(vap->iv_nw_keys[kid].wk_key, tmpkey, len);
} else {
bzero(tmpkey, len);
}
ireq->i_len = len;
error = copyout(tmpkey, ireq->i_data, len);
break;
case IEEE80211_IOC_NUMWEPKEYS:
ireq->i_val = IEEE80211_WEP_NKID;
break;
case IEEE80211_IOC_WEPTXKEY:
ireq->i_val = vap->iv_def_txkey;
break;
case IEEE80211_IOC_AUTHMODE:
if (vap->iv_flags & IEEE80211_F_WPA)
ireq->i_val = IEEE80211_AUTH_WPA;
else
ireq->i_val = vap->iv_bss->ni_authmode;
break;
case IEEE80211_IOC_CHANNEL:
ireq->i_val = ieee80211_chan2ieee(ic, ic->ic_curchan);
break;
case IEEE80211_IOC_POWERSAVE:
if (vap->iv_flags & IEEE80211_F_PMGTON)
ireq->i_val = IEEE80211_POWERSAVE_ON;
else
ireq->i_val = IEEE80211_POWERSAVE_OFF;
break;
case IEEE80211_IOC_POWERSAVESLEEP:
ireq->i_val = ic->ic_lintval;
break;
case IEEE80211_IOC_RTSTHRESHOLD:
ireq->i_val = vap->iv_rtsthreshold;
break;
case IEEE80211_IOC_PROTMODE:
ireq->i_val = ic->ic_protmode;
break;
case IEEE80211_IOC_TXPOWER:
/*
* Tx power limit is the min of max regulatory
* power, any user-set limit, and the max the
* radio can do.
*/
ireq->i_val = 2*ic->ic_curchan->ic_maxregpower;
if (ireq->i_val > ic->ic_txpowlimit)
ireq->i_val = ic->ic_txpowlimit;
if (ireq->i_val > ic->ic_curchan->ic_maxpower)
ireq->i_val = ic->ic_curchan->ic_maxpower;
break;
case IEEE80211_IOC_WPA:
switch (vap->iv_flags & IEEE80211_F_WPA) {
case IEEE80211_F_WPA1:
ireq->i_val = 1;
break;
case IEEE80211_F_WPA2:
ireq->i_val = 2;
break;
case IEEE80211_F_WPA1 | IEEE80211_F_WPA2:
ireq->i_val = 3;
break;
default:
ireq->i_val = 0;
break;
}
break;
case IEEE80211_IOC_CHANLIST:
error = ieee80211_ioctl_getchanlist(vap, ireq);
break;
case IEEE80211_IOC_ROAMING:
ireq->i_val = vap->iv_roaming;
break;
case IEEE80211_IOC_PRIVACY:
ireq->i_val = (vap->iv_flags & IEEE80211_F_PRIVACY) != 0;
break;
case IEEE80211_IOC_DROPUNENCRYPTED:
ireq->i_val = (vap->iv_flags & IEEE80211_F_DROPUNENC) != 0;
break;
case IEEE80211_IOC_COUNTERMEASURES:
ireq->i_val = (vap->iv_flags & IEEE80211_F_COUNTERM) != 0;
break;
case IEEE80211_IOC_WME:
ireq->i_val = (vap->iv_flags & IEEE80211_F_WME) != 0;
break;
case IEEE80211_IOC_HIDESSID:
ireq->i_val = (vap->iv_flags & IEEE80211_F_HIDESSID) != 0;
break;
case IEEE80211_IOC_APBRIDGE:
ireq->i_val = (vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0;
break;
case IEEE80211_IOC_WPAKEY:
error = ieee80211_ioctl_getkey(vap, ireq);
break;
case IEEE80211_IOC_CHANINFO:
error = ieee80211_ioctl_getchaninfo(vap, ireq);
break;
case IEEE80211_IOC_BSSID:
if (ireq->i_len != IEEE80211_ADDR_LEN)
return EINVAL;
if (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP) {
error = copyout(vap->iv_opmode == IEEE80211_M_WDS ?
vap->iv_bss->ni_macaddr : vap->iv_bss->ni_bssid,
ireq->i_data, ireq->i_len);
} else
error = copyout(vap->iv_des_bssid, ireq->i_data,
ireq->i_len);
break;
case IEEE80211_IOC_WPAIE:
error = ieee80211_ioctl_getwpaie(vap, ireq, ireq->i_type);
break;
case IEEE80211_IOC_WPAIE2:
error = ieee80211_ioctl_getwpaie(vap, ireq, ireq->i_type);
break;
case IEEE80211_IOC_SCAN_RESULTS:
error = ieee80211_ioctl_getscanresults(vap, ireq);
break;
case IEEE80211_IOC_STA_STATS:
error = ieee80211_ioctl_getstastats(vap, ireq);
break;
case IEEE80211_IOC_TXPOWMAX:
ireq->i_val = vap->iv_bss->ni_txpower;
break;
case IEEE80211_IOC_STA_TXPOW:
error = ieee80211_ioctl_getstatxpow(vap, ireq);
break;
case IEEE80211_IOC_STA_INFO:
error = ieee80211_ioctl_getstainfo(vap, ireq);
break;
case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */
case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */
case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */
case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */
case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */
case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (bss only) */
error = ieee80211_ioctl_getwmeparam(vap, ireq);
break;
case IEEE80211_IOC_DTIM_PERIOD:
ireq->i_val = vap->iv_dtim_period;
break;
case IEEE80211_IOC_BEACON_INTERVAL:
/* NB: get from ic_bss for station mode */
ireq->i_val = vap->iv_bss->ni_intval;
break;
case IEEE80211_IOC_PUREG:
ireq->i_val = (vap->iv_flags & IEEE80211_F_PUREG) != 0;
break;
case IEEE80211_IOC_QUIET:
ireq->i_val = vap->iv_quiet;
break;
case IEEE80211_IOC_QUIET_COUNT:
ireq->i_val = vap->iv_quiet_count;
break;
case IEEE80211_IOC_QUIET_PERIOD:
ireq->i_val = vap->iv_quiet_period;
break;
case IEEE80211_IOC_QUIET_DUR:
ireq->i_val = vap->iv_quiet_duration;
break;
case IEEE80211_IOC_QUIET_OFFSET:
ireq->i_val = vap->iv_quiet_offset;
break;
case IEEE80211_IOC_BGSCAN:
ireq->i_val = (vap->iv_flags & IEEE80211_F_BGSCAN) != 0;
break;
case IEEE80211_IOC_BGSCAN_IDLE:
ireq->i_val = vap->iv_bgscanidle*hz/1000; /* ms */
break;
case IEEE80211_IOC_BGSCAN_INTERVAL:
ireq->i_val = vap->iv_bgscanintvl/hz; /* seconds */
break;
case IEEE80211_IOC_SCANVALID:
ireq->i_val = vap->iv_scanvalid/hz; /* seconds */
break;
case IEEE80211_IOC_FRAGTHRESHOLD:
ireq->i_val = vap->iv_fragthreshold;
break;
case IEEE80211_IOC_MACCMD:
error = ieee80211_ioctl_getmaccmd(vap, ireq);
break;
case IEEE80211_IOC_BURST:
ireq->i_val = (vap->iv_flags & IEEE80211_F_BURST) != 0;
break;
case IEEE80211_IOC_BMISSTHRESHOLD:
ireq->i_val = vap->iv_bmissthreshold;
break;
case IEEE80211_IOC_CURCHAN:
error = ieee80211_ioctl_getcurchan(vap, ireq);
break;
case IEEE80211_IOC_SHORTGI:
ireq->i_val = 0;
if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20)
ireq->i_val |= IEEE80211_HTCAP_SHORTGI20;
if (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40)
ireq->i_val |= IEEE80211_HTCAP_SHORTGI40;
break;
case IEEE80211_IOC_AMPDU:
ireq->i_val = 0;
if (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX)
ireq->i_val |= 1;
if (vap->iv_flags_ht & IEEE80211_FHT_AMPDU_RX)
ireq->i_val |= 2;
break;
case IEEE80211_IOC_AMPDU_LIMIT:
if (vap->iv_opmode == IEEE80211_M_HOSTAP)
ireq->i_val = vap->iv_ampdu_rxmax;
else if (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)
ireq->i_val = MS(vap->iv_bss->ni_htparam,
IEEE80211_HTCAP_MAXRXAMPDU);
else
ireq->i_val = vap->iv_ampdu_limit;
break;
case IEEE80211_IOC_AMPDU_DENSITY:
if (vap->iv_opmode == IEEE80211_M_STA &&
(vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP))
ireq->i_val = MS(vap->iv_bss->ni_htparam,
IEEE80211_HTCAP_MPDUDENSITY);
else
ireq->i_val = vap->iv_ampdu_density;
break;
case IEEE80211_IOC_AMSDU:
ireq->i_val = 0;
if (vap->iv_flags_ht & IEEE80211_FHT_AMSDU_TX)
ireq->i_val |= 1;
if (vap->iv_flags_ht & IEEE80211_FHT_AMSDU_RX)
ireq->i_val |= 2;
break;
case IEEE80211_IOC_AMSDU_LIMIT:
ireq->i_val = vap->iv_amsdu_limit; /* XXX truncation? */
break;
case IEEE80211_IOC_PUREN:
ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_PUREN) != 0;
break;
case IEEE80211_IOC_DOTH:
ireq->i_val = (vap->iv_flags & IEEE80211_F_DOTH) != 0;
break;
case IEEE80211_IOC_REGDOMAIN:
error = ieee80211_ioctl_getregdomain(vap, ireq);
break;
case IEEE80211_IOC_ROAM:
error = ieee80211_ioctl_getroam(vap, ireq);
break;
case IEEE80211_IOC_TXPARAMS:
error = ieee80211_ioctl_gettxparams(vap, ireq);
break;
case IEEE80211_IOC_HTCOMPAT:
ireq->i_val = (vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) != 0;
break;
case IEEE80211_IOC_DWDS:
ireq->i_val = (vap->iv_flags & IEEE80211_F_DWDS) != 0;
break;
case IEEE80211_IOC_INACTIVITY:
ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_INACT) != 0;
break;
case IEEE80211_IOC_APPIE:
error = ieee80211_ioctl_getappie(vap, ireq);
break;
case IEEE80211_IOC_WPS:
ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_WPS) != 0;
break;
case IEEE80211_IOC_TSN:
ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_TSN) != 0;
break;
case IEEE80211_IOC_DFS:
ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_DFS) != 0;
break;
case IEEE80211_IOC_DOTD:
ireq->i_val = (vap->iv_flags_ext & IEEE80211_FEXT_DOTD) != 0;
break;
case IEEE80211_IOC_DEVCAPS:
error = ieee80211_ioctl_getdevcaps(ic, ireq);
break;
case IEEE80211_IOC_HTPROTMODE:
ireq->i_val = ic->ic_htprotmode;
break;
case IEEE80211_IOC_HTCONF:
if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
ireq->i_val = 1;
if (vap->iv_flags_ht & IEEE80211_FHT_USEHT40)
ireq->i_val |= 2;
} else
ireq->i_val = 0;
break;
case IEEE80211_IOC_STA_VLAN:
error = ieee80211_ioctl_getstavlan(vap, ireq);
break;
case IEEE80211_IOC_SMPS:
if (vap->iv_opmode == IEEE80211_M_STA &&
(vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) {
if (vap->iv_bss->ni_flags & IEEE80211_NODE_MIMO_RTS)
ireq->i_val = IEEE80211_HTCAP_SMPS_DYNAMIC;
else if (vap->iv_bss->ni_flags & IEEE80211_NODE_MIMO_PS)
ireq->i_val = IEEE80211_HTCAP_SMPS_ENA;
else
ireq->i_val = IEEE80211_HTCAP_SMPS_OFF;
} else
ireq->i_val = vap->iv_htcaps & IEEE80211_HTCAP_SMPS;
break;
case IEEE80211_IOC_RIFS:
if (vap->iv_opmode == IEEE80211_M_STA &&
(vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP))
ireq->i_val =
(vap->iv_bss->ni_flags & IEEE80211_NODE_RIFS) != 0;
else
ireq->i_val =
(vap->iv_flags_ht & IEEE80211_FHT_RIFS) != 0;
break;
default:
error = ieee80211_ioctl_getdefault(vap, ireq);
break;
}
return error;
#undef MS
}
static __noinline int
ieee80211_ioctl_setkey(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211req_key ik;
struct ieee80211_node *ni;
struct ieee80211_key *wk;
uint16_t kid;
int error, i;
if (ireq->i_len != sizeof(ik))
return EINVAL;
error = copyin(ireq->i_data, &ik, sizeof(ik));
if (error)
return error;
/* NB: cipher support is verified by ieee80211_crypt_newkey */
/* NB: this also checks ik->ik_keylen > sizeof(wk->wk_key) */
if (ik.ik_keylen > sizeof(ik.ik_keydata))
return E2BIG;
kid = ik.ik_keyix;
if (kid == IEEE80211_KEYIX_NONE) {
/* XXX unicast keys currently must be tx/rx */
if (ik.ik_flags != (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))
return EINVAL;
if (vap->iv_opmode == IEEE80211_M_STA) {
ni = ieee80211_ref_node(vap->iv_bss);
if (!IEEE80211_ADDR_EQ(ik.ik_macaddr, ni->ni_bssid)) {
ieee80211_free_node(ni);
return EADDRNOTAVAIL;
}
} else {
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap,
ik.ik_macaddr);
if (ni == NULL)
return ENOENT;
}
wk = &ni->ni_ucastkey;
} else {
if (kid >= IEEE80211_WEP_NKID)
return EINVAL;
wk = &vap->iv_nw_keys[kid];
/*
* Global slots start off w/o any assigned key index.
* Force one here for consistency with IEEE80211_IOC_WEPKEY.
*/
if (wk->wk_keyix == IEEE80211_KEYIX_NONE)
wk->wk_keyix = kid;
ni = NULL;
}
error = 0;
ieee80211_key_update_begin(vap);
if (ieee80211_crypto_newkey(vap, ik.ik_type, ik.ik_flags, wk)) {
wk->wk_keylen = ik.ik_keylen;
/* NB: MIC presence is implied by cipher type */
if (wk->wk_keylen > IEEE80211_KEYBUF_SIZE)
wk->wk_keylen = IEEE80211_KEYBUF_SIZE;
for (i = 0; i < IEEE80211_TID_SIZE; i++)
wk->wk_keyrsc[i] = ik.ik_keyrsc;
wk->wk_keytsc = 0; /* new key, reset */
memset(wk->wk_key, 0, sizeof(wk->wk_key));
memcpy(wk->wk_key, ik.ik_keydata, ik.ik_keylen);
IEEE80211_ADDR_COPY(wk->wk_macaddr,
ni != NULL ? ni->ni_macaddr : ik.ik_macaddr);
if (!ieee80211_crypto_setkey(vap, wk))
error = EIO;
else if ((ik.ik_flags & IEEE80211_KEY_DEFAULT))
vap->iv_def_txkey = kid;
} else
error = ENXIO;
ieee80211_key_update_end(vap);
if (ni != NULL)
ieee80211_free_node(ni);
return error;
}
static __noinline int
ieee80211_ioctl_delkey(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211req_del_key dk;
int kid, error;
if (ireq->i_len != sizeof(dk))
return EINVAL;
error = copyin(ireq->i_data, &dk, sizeof(dk));
if (error)
return error;
kid = dk.idk_keyix;
/* XXX uint8_t -> uint16_t */
if (dk.idk_keyix == (uint8_t) IEEE80211_KEYIX_NONE) {
struct ieee80211_node *ni;
if (vap->iv_opmode == IEEE80211_M_STA) {
ni = ieee80211_ref_node(vap->iv_bss);
if (!IEEE80211_ADDR_EQ(dk.idk_macaddr, ni->ni_bssid)) {
ieee80211_free_node(ni);
return EADDRNOTAVAIL;
}
} else {
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap,
dk.idk_macaddr);
if (ni == NULL)
return ENOENT;
}
/* XXX error return */
ieee80211_node_delucastkey(ni);
ieee80211_free_node(ni);
} else {
if (kid >= IEEE80211_WEP_NKID)
return EINVAL;
/* XXX error return */
ieee80211_crypto_delkey(vap, &vap->iv_nw_keys[kid]);
}
return 0;
}
struct mlmeop {
struct ieee80211vap *vap;
int op;
int reason;
};
static void
mlmedebug(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN],
int op, int reason)
{
#ifdef IEEE80211_DEBUG
static const struct {
int mask;
const char *opstr;
} ops[] = {
{ 0, "op#0" },
{ IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
IEEE80211_MSG_ASSOC, "assoc" },
{ IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
IEEE80211_MSG_ASSOC, "disassoc" },
{ IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
IEEE80211_MSG_AUTH, "deauth" },
{ IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
IEEE80211_MSG_AUTH, "authorize" },
{ IEEE80211_MSG_IOCTL | IEEE80211_MSG_STATE |
IEEE80211_MSG_AUTH, "unauthorize" },
};
if (op == IEEE80211_MLME_AUTH) {
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_IOCTL |
IEEE80211_MSG_STATE | IEEE80211_MSG_AUTH, mac,
"station authenticate %s via MLME (reason %d)",
reason == IEEE80211_STATUS_SUCCESS ? "ACCEPT" : "REJECT",
reason);
} else if (!(IEEE80211_MLME_ASSOC <= op && op <= IEEE80211_MLME_AUTH)) {
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_ANY, mac,
"unknown MLME request %d (reason %d)", op, reason);
} else if (reason == IEEE80211_STATUS_SUCCESS) {
IEEE80211_NOTE_MAC(vap, ops[op].mask, mac,
"station %s via MLME", ops[op].opstr);
} else {
IEEE80211_NOTE_MAC(vap, ops[op].mask, mac,
"station %s via MLME (reason %d)", ops[op].opstr, reason);
}
#endif /* IEEE80211_DEBUG */
}
static void
domlme(void *arg, struct ieee80211_node *ni)
{
struct mlmeop *mop = arg;
struct ieee80211vap *vap = ni->ni_vap;
if (vap != mop->vap)
return;
/*
* NB: if ni_associd is zero then the node is already cleaned
* up and we don't need to do this (we're safely holding a
* reference but should otherwise not modify it's state).
*/
if (ni->ni_associd == 0)
return;
mlmedebug(vap, ni->ni_macaddr, mop->op, mop->reason);
if (mop->op == IEEE80211_MLME_DEAUTH) {
IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
mop->reason);
} else {
IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DISASSOC,
mop->reason);
}
ieee80211_node_leave(ni);
}
static int
setmlme_dropsta(struct ieee80211vap *vap,
const uint8_t mac[IEEE80211_ADDR_LEN], struct mlmeop *mlmeop)
{
- struct ieee80211com *ic = vap->iv_ic;
- struct ieee80211_node_table *nt = &ic->ic_sta;
+ struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta;
struct ieee80211_node *ni;
int error = 0;
/* NB: the broadcast address means do 'em all */
- if (!IEEE80211_ADDR_EQ(mac, ic->ic_ifp->if_broadcastaddr)) {
+ if (!IEEE80211_ADDR_EQ(mac, vap->iv_ifp->if_broadcastaddr)) {
IEEE80211_NODE_LOCK(nt);
ni = ieee80211_find_node_locked(nt, mac);
IEEE80211_NODE_UNLOCK(nt);
/*
* Don't do the node update inside the node
* table lock. This unfortunately causes LORs
* with drivers and their TX paths.
*/
if (ni != NULL) {
domlme(mlmeop, ni);
ieee80211_free_node(ni);
} else
error = ENOENT;
} else {
ieee80211_iterate_nodes(nt, domlme, mlmeop);
}
return error;
}
static __noinline int
setmlme_common(struct ieee80211vap *vap, int op,
const uint8_t mac[IEEE80211_ADDR_LEN], int reason)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_node_table *nt = &ic->ic_sta;
struct ieee80211_node *ni;
struct mlmeop mlmeop;
int error;
error = 0;
switch (op) {
case IEEE80211_MLME_DISASSOC:
case IEEE80211_MLME_DEAUTH:
switch (vap->iv_opmode) {
case IEEE80211_M_STA:
mlmedebug(vap, vap->iv_bss->ni_macaddr, op, reason);
/* XXX not quite right */
ieee80211_new_state(vap, IEEE80211_S_INIT, reason);
break;
case IEEE80211_M_HOSTAP:
mlmeop.vap = vap;
mlmeop.op = op;
mlmeop.reason = reason;
error = setmlme_dropsta(vap, mac, &mlmeop);
break;
case IEEE80211_M_WDS:
/* XXX user app should send raw frame? */
if (op != IEEE80211_MLME_DEAUTH) {
error = EINVAL;
break;
}
#if 0
/* XXX accept any address, simplifies user code */
if (!IEEE80211_ADDR_EQ(mac, vap->iv_bss->ni_macaddr)) {
error = EINVAL;
break;
}
#endif
mlmedebug(vap, vap->iv_bss->ni_macaddr, op, reason);
ni = ieee80211_ref_node(vap->iv_bss);
IEEE80211_SEND_MGMT(ni,
IEEE80211_FC0_SUBTYPE_DEAUTH, reason);
ieee80211_free_node(ni);
break;
case IEEE80211_M_MBSS:
IEEE80211_NODE_LOCK(nt);
ni = ieee80211_find_node_locked(nt, mac);
/*
* Don't do the node update inside the node
* table lock. This unfortunately causes LORs
* with drivers and their TX paths.
*/
IEEE80211_NODE_UNLOCK(nt);
if (ni != NULL) {
ieee80211_node_leave(ni);
ieee80211_free_node(ni);
} else {
error = ENOENT;
}
break;
default:
error = EINVAL;
break;
}
break;
case IEEE80211_MLME_AUTHORIZE:
case IEEE80211_MLME_UNAUTHORIZE:
if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
vap->iv_opmode != IEEE80211_M_WDS) {
error = EINVAL;
break;
}
IEEE80211_NODE_LOCK(nt);
ni = ieee80211_find_vap_node_locked(nt, vap, mac);
/*
* Don't do the node update inside the node
* table lock. This unfortunately causes LORs
* with drivers and their TX paths.
*/
IEEE80211_NODE_UNLOCK(nt);
if (ni != NULL) {
mlmedebug(vap, mac, op, reason);
if (op == IEEE80211_MLME_AUTHORIZE)
ieee80211_node_authorize(ni);
else
ieee80211_node_unauthorize(ni);
ieee80211_free_node(ni);
} else
error = ENOENT;
break;
case IEEE80211_MLME_AUTH:
if (vap->iv_opmode != IEEE80211_M_HOSTAP) {
error = EINVAL;
break;
}
IEEE80211_NODE_LOCK(nt);
ni = ieee80211_find_vap_node_locked(nt, vap, mac);
/*
* Don't do the node update inside the node
* table lock. This unfortunately causes LORs
* with drivers and their TX paths.
*/
IEEE80211_NODE_UNLOCK(nt);
if (ni != NULL) {
mlmedebug(vap, mac, op, reason);
if (reason == IEEE80211_STATUS_SUCCESS) {
IEEE80211_SEND_MGMT(ni,
IEEE80211_FC0_SUBTYPE_AUTH, 2);
/*
* For shared key auth, just continue the
* exchange. Otherwise when 802.1x is not in
* use mark the port authorized at this point
* so traffic can flow.
*/
if (ni->ni_authmode != IEEE80211_AUTH_8021X &&
ni->ni_challenge == NULL)
ieee80211_node_authorize(ni);
} else {
vap->iv_stats.is_rx_acl++;
ieee80211_send_error(ni, ni->ni_macaddr,
IEEE80211_FC0_SUBTYPE_AUTH, 2|(reason<<16));
ieee80211_node_leave(ni);
}
ieee80211_free_node(ni);
} else
error = ENOENT;
break;
default:
error = EINVAL;
break;
}
return error;
}
struct scanlookup {
const uint8_t *mac;
int esslen;
const uint8_t *essid;
const struct ieee80211_scan_entry *se;
};
/*
* Match mac address and any ssid.
*/
static void
mlmelookup(void *arg, const struct ieee80211_scan_entry *se)
{
struct scanlookup *look = arg;
if (!IEEE80211_ADDR_EQ(look->mac, se->se_macaddr))
return;
if (look->esslen != 0) {
if (se->se_ssid[1] != look->esslen)
return;
if (memcmp(look->essid, se->se_ssid+2, look->esslen))
return;
}
look->se = se;
}
static __noinline int
setmlme_assoc_sta(struct ieee80211vap *vap,
const uint8_t mac[IEEE80211_ADDR_LEN], int ssid_len,
const uint8_t ssid[IEEE80211_NWID_LEN])
{
struct scanlookup lookup;
KASSERT(vap->iv_opmode == IEEE80211_M_STA,
("expected opmode STA not %s",
ieee80211_opmode_name[vap->iv_opmode]));
/* NB: this is racey if roaming is !manual */
lookup.se = NULL;
lookup.mac = mac;
lookup.esslen = ssid_len;
lookup.essid = ssid;
ieee80211_scan_iterate(vap, mlmelookup, &lookup);
if (lookup.se == NULL)
return ENOENT;
mlmedebug(vap, mac, IEEE80211_MLME_ASSOC, 0);
if (!ieee80211_sta_join(vap, lookup.se->se_chan, lookup.se))
return EIO; /* XXX unique but could be better */
return 0;
}
static __noinline int
setmlme_assoc_adhoc(struct ieee80211vap *vap,
const uint8_t mac[IEEE80211_ADDR_LEN], int ssid_len,
const uint8_t ssid[IEEE80211_NWID_LEN])
{
struct ieee80211_scan_req sr;
KASSERT(vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_AHDEMO,
("expected opmode IBSS or AHDEMO not %s",
ieee80211_opmode_name[vap->iv_opmode]));
if (ssid_len == 0)
return EINVAL;
/* NB: IEEE80211_IOC_SSID call missing for ap_scan=2. */
memset(vap->iv_des_ssid[0].ssid, 0, IEEE80211_NWID_LEN);
vap->iv_des_ssid[0].len = ssid_len;
memcpy(vap->iv_des_ssid[0].ssid, ssid, ssid_len);
vap->iv_des_nssid = 1;
memset(&sr, 0, sizeof(sr));
sr.sr_flags = IEEE80211_IOC_SCAN_ACTIVE | IEEE80211_IOC_SCAN_ONCE;
sr.sr_duration = IEEE80211_IOC_SCAN_FOREVER;
memcpy(sr.sr_ssid[0].ssid, ssid, ssid_len);
sr.sr_ssid[0].len = ssid_len;
sr.sr_nssid = 1;
return ieee80211_scanreq(vap, &sr);
}
static __noinline int
ieee80211_ioctl_setmlme(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211req_mlme mlme;
int error;
if (ireq->i_len != sizeof(mlme))
return EINVAL;
error = copyin(ireq->i_data, &mlme, sizeof(mlme));
if (error)
return error;
if (vap->iv_opmode == IEEE80211_M_STA &&
mlme.im_op == IEEE80211_MLME_ASSOC)
return setmlme_assoc_sta(vap, mlme.im_macaddr,
vap->iv_des_ssid[0].len, vap->iv_des_ssid[0].ssid);
else if ((vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_AHDEMO) &&
mlme.im_op == IEEE80211_MLME_ASSOC)
return setmlme_assoc_adhoc(vap, mlme.im_macaddr,
mlme.im_ssid_len, mlme.im_ssid);
else
return setmlme_common(vap, mlme.im_op,
mlme.im_macaddr, mlme.im_reason);
}
static __noinline int
ieee80211_ioctl_macmac(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
uint8_t mac[IEEE80211_ADDR_LEN];
const struct ieee80211_aclator *acl = vap->iv_acl;
int error;
if (ireq->i_len != sizeof(mac))
return EINVAL;
error = copyin(ireq->i_data, mac, ireq->i_len);
if (error)
return error;
if (acl == NULL) {
acl = ieee80211_aclator_get("mac");
if (acl == NULL || !acl->iac_attach(vap))
return EINVAL;
vap->iv_acl = acl;
}
if (ireq->i_type == IEEE80211_IOC_ADDMAC)
acl->iac_add(vap, mac);
else
acl->iac_remove(vap, mac);
return 0;
}
static __noinline int
ieee80211_ioctl_setmaccmd(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
const struct ieee80211_aclator *acl = vap->iv_acl;
switch (ireq->i_val) {
case IEEE80211_MACCMD_POLICY_OPEN:
case IEEE80211_MACCMD_POLICY_ALLOW:
case IEEE80211_MACCMD_POLICY_DENY:
case IEEE80211_MACCMD_POLICY_RADIUS:
if (acl == NULL) {
acl = ieee80211_aclator_get("mac");
if (acl == NULL || !acl->iac_attach(vap))
return EINVAL;
vap->iv_acl = acl;
}
acl->iac_setpolicy(vap, ireq->i_val);
break;
case IEEE80211_MACCMD_FLUSH:
if (acl != NULL)
acl->iac_flush(vap);
/* NB: silently ignore when not in use */
break;
case IEEE80211_MACCMD_DETACH:
if (acl != NULL) {
vap->iv_acl = NULL;
acl->iac_detach(vap);
}
break;
default:
if (acl == NULL)
return EINVAL;
else
return acl->iac_setioctl(vap, ireq);
}
return 0;
}
static __noinline int
ieee80211_ioctl_setchanlist(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
uint8_t *chanlist, *list;
int i, nchan, maxchan, error;
if (ireq->i_len > sizeof(ic->ic_chan_active))
ireq->i_len = sizeof(ic->ic_chan_active);
list = IEEE80211_MALLOC(ireq->i_len + IEEE80211_CHAN_BYTES, M_TEMP,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (list == NULL)
return ENOMEM;
error = copyin(ireq->i_data, list, ireq->i_len);
if (error) {
IEEE80211_FREE(list, M_TEMP);
return error;
}
nchan = 0;
chanlist = list + ireq->i_len; /* NB: zero'd already */
maxchan = ireq->i_len * NBBY;
for (i = 0; i < ic->ic_nchans; i++) {
const struct ieee80211_channel *c = &ic->ic_channels[i];
/*
* Calculate the intersection of the user list and the
* available channels so users can do things like specify
* 1-255 to get all available channels.
*/
if (c->ic_ieee < maxchan && isset(list, c->ic_ieee)) {
setbit(chanlist, c->ic_ieee);
nchan++;
}
}
if (nchan == 0) {
IEEE80211_FREE(list, M_TEMP);
return EINVAL;
}
if (ic->ic_bsschan != IEEE80211_CHAN_ANYC && /* XXX */
isclr(chanlist, ic->ic_bsschan->ic_ieee))
ic->ic_bsschan = IEEE80211_CHAN_ANYC;
memcpy(ic->ic_chan_active, chanlist, IEEE80211_CHAN_BYTES);
ieee80211_scan_flush(vap);
IEEE80211_FREE(list, M_TEMP);
return ENETRESET;
}
static __noinline int
ieee80211_ioctl_setstastats(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211_node *ni;
uint8_t macaddr[IEEE80211_ADDR_LEN];
int error;
/*
* NB: we could copyin ieee80211req_sta_stats so apps
* could make selective changes but that's overkill;
* just clear all stats for now.
*/
if (ireq->i_len < IEEE80211_ADDR_LEN)
return EINVAL;
error = copyin(ireq->i_data, macaddr, IEEE80211_ADDR_LEN);
if (error != 0)
return error;
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, macaddr);
if (ni == NULL)
return ENOENT;
/* XXX require ni_vap == vap? */
memset(&ni->ni_stats, 0, sizeof(ni->ni_stats));
ieee80211_free_node(ni);
return 0;
}
static __noinline int
ieee80211_ioctl_setstatxpow(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211_node *ni;
struct ieee80211req_sta_txpow txpow;
int error;
if (ireq->i_len != sizeof(txpow))
return EINVAL;
error = copyin(ireq->i_data, &txpow, sizeof(txpow));
if (error != 0)
return error;
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap, txpow.it_macaddr);
if (ni == NULL)
return ENOENT;
ni->ni_txpower = txpow.it_txpow;
ieee80211_free_node(ni);
return error;
}
static __noinline int
ieee80211_ioctl_setwmeparam(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_wme_state *wme = &ic->ic_wme;
struct wmeParams *wmep, *chanp;
int isbss, ac;
if ((ic->ic_caps & IEEE80211_C_WME) == 0)
return EOPNOTSUPP;
isbss = (ireq->i_len & IEEE80211_WMEPARAM_BSS);
ac = (ireq->i_len & IEEE80211_WMEPARAM_VAL);
if (ac >= WME_NUM_AC)
ac = WME_AC_BE;
if (isbss) {
chanp = &wme->wme_bssChanParams.cap_wmeParams[ac];
wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[ac];
} else {
chanp = &wme->wme_chanParams.cap_wmeParams[ac];
wmep = &wme->wme_wmeChanParams.cap_wmeParams[ac];
}
switch (ireq->i_type) {
case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */
if (isbss) {
wmep->wmep_logcwmin = ireq->i_val;
if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
chanp->wmep_logcwmin = ireq->i_val;
} else {
wmep->wmep_logcwmin = chanp->wmep_logcwmin =
ireq->i_val;
}
break;
case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */
if (isbss) {
wmep->wmep_logcwmax = ireq->i_val;
if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
chanp->wmep_logcwmax = ireq->i_val;
} else {
wmep->wmep_logcwmax = chanp->wmep_logcwmax =
ireq->i_val;
}
break;
case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */
if (isbss) {
wmep->wmep_aifsn = ireq->i_val;
if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
chanp->wmep_aifsn = ireq->i_val;
} else {
wmep->wmep_aifsn = chanp->wmep_aifsn = ireq->i_val;
}
break;
case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */
if (isbss) {
wmep->wmep_txopLimit = ireq->i_val;
if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
chanp->wmep_txopLimit = ireq->i_val;
} else {
wmep->wmep_txopLimit = chanp->wmep_txopLimit =
ireq->i_val;
}
break;
case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */
wmep->wmep_acm = ireq->i_val;
if ((wme->wme_flags & WME_F_AGGRMODE) == 0)
chanp->wmep_acm = ireq->i_val;
break;
case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (!bss only)*/
wmep->wmep_noackPolicy = chanp->wmep_noackPolicy =
(ireq->i_val) == 0;
break;
}
ieee80211_wme_updateparams(vap);
return 0;
}
static int
find11gchannel(struct ieee80211com *ic, int start, int freq)
{
const struct ieee80211_channel *c;
int i;
for (i = start+1; i < ic->ic_nchans; i++) {
c = &ic->ic_channels[i];
if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
return 1;
}
/* NB: should not be needed but in case things are mis-sorted */
for (i = 0; i < start; i++) {
c = &ic->ic_channels[i];
if (c->ic_freq == freq && IEEE80211_IS_CHAN_ANYG(c))
return 1;
}
return 0;
}
static struct ieee80211_channel *
findchannel(struct ieee80211com *ic, int ieee, int mode)
{
static const u_int chanflags[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = 0,
[IEEE80211_MODE_11A] = IEEE80211_CHAN_A,
[IEEE80211_MODE_11B] = IEEE80211_CHAN_B,
[IEEE80211_MODE_11G] = IEEE80211_CHAN_G,
[IEEE80211_MODE_FH] = IEEE80211_CHAN_FHSS,
[IEEE80211_MODE_TURBO_A] = IEEE80211_CHAN_108A,
[IEEE80211_MODE_TURBO_G] = IEEE80211_CHAN_108G,
[IEEE80211_MODE_STURBO_A] = IEEE80211_CHAN_STURBO,
[IEEE80211_MODE_HALF] = IEEE80211_CHAN_HALF,
[IEEE80211_MODE_QUARTER] = IEEE80211_CHAN_QUARTER,
/* NB: handled specially below */
[IEEE80211_MODE_11NA] = IEEE80211_CHAN_A,
[IEEE80211_MODE_11NG] = IEEE80211_CHAN_G,
};
u_int modeflags;
int i;
modeflags = chanflags[mode];
for (i = 0; i < ic->ic_nchans; i++) {
struct ieee80211_channel *c = &ic->ic_channels[i];
if (c->ic_ieee != ieee)
continue;
if (mode == IEEE80211_MODE_AUTO) {
/* ignore turbo channels for autoselect */
if (IEEE80211_IS_CHAN_TURBO(c))
continue;
/*
* XXX special-case 11b/g channels so we
* always select the g channel if both
* are present.
* XXX prefer HT to non-HT?
*/
if (!IEEE80211_IS_CHAN_B(c) ||
!find11gchannel(ic, i, c->ic_freq))
return c;
} else {
/* must check HT specially */
if ((mode == IEEE80211_MODE_11NA ||
mode == IEEE80211_MODE_11NG) &&
!IEEE80211_IS_CHAN_HT(c))
continue;
if ((c->ic_flags & modeflags) == modeflags)
return c;
}
}
return NULL;
}
/*
* Check the specified against any desired mode (aka netband).
* This is only used (presently) when operating in hostap mode
* to enforce consistency.
*/
static int
check_mode_consistency(const struct ieee80211_channel *c, int mode)
{
KASSERT(c != IEEE80211_CHAN_ANYC, ("oops, no channel"));
switch (mode) {
case IEEE80211_MODE_11B:
return (IEEE80211_IS_CHAN_B(c));
case IEEE80211_MODE_11G:
return (IEEE80211_IS_CHAN_ANYG(c) && !IEEE80211_IS_CHAN_HT(c));
case IEEE80211_MODE_11A:
return (IEEE80211_IS_CHAN_A(c) && !IEEE80211_IS_CHAN_HT(c));
case IEEE80211_MODE_STURBO_A:
return (IEEE80211_IS_CHAN_STURBO(c));
case IEEE80211_MODE_11NA:
return (IEEE80211_IS_CHAN_HTA(c));
case IEEE80211_MODE_11NG:
return (IEEE80211_IS_CHAN_HTG(c));
}
return 1;
}
/*
* Common code to set the current channel. If the device
* is up and running this may result in an immediate channel
* change or a kick of the state machine.
*/
static int
setcurchan(struct ieee80211vap *vap, struct ieee80211_channel *c)
{
struct ieee80211com *ic = vap->iv_ic;
int error;
if (c != IEEE80211_CHAN_ANYC) {
if (IEEE80211_IS_CHAN_RADAR(c))
return EBUSY; /* XXX better code? */
if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
if (IEEE80211_IS_CHAN_NOHOSTAP(c))
return EINVAL;
if (!check_mode_consistency(c, vap->iv_des_mode))
return EINVAL;
} else if (vap->iv_opmode == IEEE80211_M_IBSS) {
if (IEEE80211_IS_CHAN_NOADHOC(c))
return EINVAL;
}
if ((vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP) &&
vap->iv_bss->ni_chan == c)
return 0; /* NB: nothing to do */
}
vap->iv_des_chan = c;
error = 0;
if (vap->iv_opmode == IEEE80211_M_MONITOR &&
vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
/*
* Monitor mode can switch directly.
*/
if (IFNET_IS_UP_RUNNING(vap->iv_ifp)) {
/* XXX need state machine for other vap's to follow */
ieee80211_setcurchan(ic, vap->iv_des_chan);
vap->iv_bss->ni_chan = ic->ic_curchan;
} else
ic->ic_curchan = vap->iv_des_chan;
ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan);
} else {
/*
* Need to go through the state machine in case we
* need to reassociate or the like. The state machine
* will pickup the desired channel and avoid scanning.
*/
if (IS_UP_AUTO(vap))
ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
else if (vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
/*
* When not up+running and a real channel has
* been specified fix the current channel so
* there is immediate feedback; e.g. via ifconfig.
*/
ic->ic_curchan = vap->iv_des_chan;
ic->ic_rt = ieee80211_get_ratetable(ic->ic_curchan);
}
}
return error;
}
/*
* Old api for setting the current channel; this is
* deprecated because channel numbers are ambiguous.
*/
static __noinline int
ieee80211_ioctl_setchannel(struct ieee80211vap *vap,
const struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_channel *c;
/* XXX 0xffff overflows 16-bit signed */
if (ireq->i_val == 0 ||
ireq->i_val == (int16_t) IEEE80211_CHAN_ANY) {
c = IEEE80211_CHAN_ANYC;
} else {
struct ieee80211_channel *c2;
c = findchannel(ic, ireq->i_val, vap->iv_des_mode);
if (c == NULL) {
c = findchannel(ic, ireq->i_val,
IEEE80211_MODE_AUTO);
if (c == NULL)
return EINVAL;
}
/*
* Fine tune channel selection based on desired mode:
* if 11b is requested, find the 11b version of any
* 11g channel returned,
* if static turbo, find the turbo version of any
* 11a channel return,
* if 11na is requested, find the ht version of any
* 11a channel returned,
* if 11ng is requested, find the ht version of any
* 11g channel returned,
* otherwise we should be ok with what we've got.
*/
switch (vap->iv_des_mode) {
case IEEE80211_MODE_11B:
if (IEEE80211_IS_CHAN_ANYG(c)) {
c2 = findchannel(ic, ireq->i_val,
IEEE80211_MODE_11B);
/* NB: should not happen, =>'s 11g w/o 11b */
if (c2 != NULL)
c = c2;
}
break;
case IEEE80211_MODE_TURBO_A:
if (IEEE80211_IS_CHAN_A(c)) {
c2 = findchannel(ic, ireq->i_val,
IEEE80211_MODE_TURBO_A);
if (c2 != NULL)
c = c2;
}
break;
case IEEE80211_MODE_11NA:
if (IEEE80211_IS_CHAN_A(c)) {
c2 = findchannel(ic, ireq->i_val,
IEEE80211_MODE_11NA);
if (c2 != NULL)
c = c2;
}
break;
case IEEE80211_MODE_11NG:
if (IEEE80211_IS_CHAN_ANYG(c)) {
c2 = findchannel(ic, ireq->i_val,
IEEE80211_MODE_11NG);
if (c2 != NULL)
c = c2;
}
break;
default: /* NB: no static turboG */
break;
}
}
return setcurchan(vap, c);
}
/*
* New/current api for setting the current channel; a complete
* channel description is provide so there is no ambiguity in
* identifying the channel.
*/
static __noinline int
ieee80211_ioctl_setcurchan(struct ieee80211vap *vap,
const struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_channel chan, *c;
int error;
if (ireq->i_len != sizeof(chan))
return EINVAL;
error = copyin(ireq->i_data, &chan, sizeof(chan));
if (error != 0)
return error;
/* XXX 0xffff overflows 16-bit signed */
if (chan.ic_freq == 0 || chan.ic_freq == IEEE80211_CHAN_ANY) {
c = IEEE80211_CHAN_ANYC;
} else {
c = ieee80211_find_channel(ic, chan.ic_freq, chan.ic_flags);
if (c == NULL)
return EINVAL;
}
return setcurchan(vap, c);
}
static __noinline int
ieee80211_ioctl_setregdomain(struct ieee80211vap *vap,
const struct ieee80211req *ireq)
{
struct ieee80211_regdomain_req *reg;
int nchans, error;
nchans = 1 + ((ireq->i_len - sizeof(struct ieee80211_regdomain_req)) /
sizeof(struct ieee80211_channel));
if (!(1 <= nchans && nchans <= IEEE80211_CHAN_MAX)) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: bad # chans, i_len %d nchans %d\n", __func__,
ireq->i_len, nchans);
return EINVAL;
}
reg = (struct ieee80211_regdomain_req *)
IEEE80211_MALLOC(IEEE80211_REGDOMAIN_SIZE(nchans), M_TEMP,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (reg == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: no memory, nchans %d\n", __func__, nchans);
return ENOMEM;
}
error = copyin(ireq->i_data, reg, IEEE80211_REGDOMAIN_SIZE(nchans));
if (error == 0) {
/* NB: validate inline channel count against storage size */
if (reg->chaninfo.ic_nchans != nchans) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: chan cnt mismatch, %d != %d\n", __func__,
reg->chaninfo.ic_nchans, nchans);
error = EINVAL;
} else
error = ieee80211_setregdomain(vap, reg);
}
IEEE80211_FREE(reg, M_TEMP);
return (error == 0 ? ENETRESET : error);
}
static int
ieee80211_ioctl_setroam(struct ieee80211vap *vap,
const struct ieee80211req *ireq)
{
if (ireq->i_len != sizeof(vap->iv_roamparms))
return EINVAL;
/* XXX validate params */
/* XXX? ENETRESET to push to device? */
return copyin(ireq->i_data, vap->iv_roamparms,
sizeof(vap->iv_roamparms));
}
static int
checkrate(const struct ieee80211_rateset *rs, int rate)
{
int i;
if (rate == IEEE80211_FIXED_RATE_NONE)
return 1;
for (i = 0; i < rs->rs_nrates; i++)
if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rate)
return 1;
return 0;
}
static int
checkmcs(int mcs)
{
if (mcs == IEEE80211_FIXED_RATE_NONE)
return 1;
if ((mcs & IEEE80211_RATE_MCS) == 0) /* MCS always have 0x80 set */
return 0;
return (mcs & 0x7f) <= 15; /* XXX could search ht rate set */
}
static __noinline int
ieee80211_ioctl_settxparams(struct ieee80211vap *vap,
const struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_txparams_req parms; /* XXX stack use? */
struct ieee80211_txparam *src, *dst;
const struct ieee80211_rateset *rs;
int error, mode, changed, is11n, nmodes;
/* NB: accept short requests for backwards compat */
if (ireq->i_len > sizeof(parms))
return EINVAL;
error = copyin(ireq->i_data, &parms, ireq->i_len);
if (error != 0)
return error;
nmodes = ireq->i_len / sizeof(struct ieee80211_txparam);
changed = 0;
/* validate parameters and check if anything changed */
for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) {
if (isclr(ic->ic_modecaps, mode))
continue;
src = &parms.params[mode];
dst = &vap->iv_txparms[mode];
rs = &ic->ic_sup_rates[mode]; /* NB: 11n maps to legacy */
is11n = (mode == IEEE80211_MODE_11NA ||
mode == IEEE80211_MODE_11NG);
if (src->ucastrate != dst->ucastrate) {
if (!checkrate(rs, src->ucastrate) &&
(!is11n || !checkmcs(src->ucastrate)))
return EINVAL;
changed++;
}
if (src->mcastrate != dst->mcastrate) {
if (!checkrate(rs, src->mcastrate) &&
(!is11n || !checkmcs(src->mcastrate)))
return EINVAL;
changed++;
}
if (src->mgmtrate != dst->mgmtrate) {
if (!checkrate(rs, src->mgmtrate) &&
(!is11n || !checkmcs(src->mgmtrate)))
return EINVAL;
changed++;
}
if (src->maxretry != dst->maxretry) /* NB: no bounds */
changed++;
}
if (changed) {
/*
* Copy new parameters in place and notify the
* driver so it can push state to the device.
*/
for (mode = IEEE80211_MODE_11A; mode < nmodes; mode++) {
if (isset(ic->ic_modecaps, mode))
vap->iv_txparms[mode] = parms.params[mode];
}
/* XXX could be more intelligent,
e.g. don't reset if setting not being used */
return ENETRESET;
}
return 0;
}
/*
* Application Information Element support.
*/
static int
setappie(struct ieee80211_appie **aie, const struct ieee80211req *ireq)
{
struct ieee80211_appie *app = *aie;
struct ieee80211_appie *napp;
int error;
if (ireq->i_len == 0) { /* delete any existing ie */
if (app != NULL) {
*aie = NULL; /* XXX racey */
IEEE80211_FREE(app, M_80211_NODE_IE);
}
return 0;
}
if (!(2 <= ireq->i_len && ireq->i_len <= IEEE80211_MAX_APPIE))
return EINVAL;
/*
* Allocate a new appie structure and copy in the user data.
* When done swap in the new structure. Note that we do not
* guard against users holding a ref to the old structure;
* this must be handled outside this code.
*
* XXX bad bad bad
*/
napp = (struct ieee80211_appie *) IEEE80211_MALLOC(
sizeof(struct ieee80211_appie) + ireq->i_len, M_80211_NODE_IE,
IEEE80211_M_NOWAIT);
if (napp == NULL)
return ENOMEM;
/* XXX holding ic lock */
error = copyin(ireq->i_data, napp->ie_data, ireq->i_len);
if (error) {
IEEE80211_FREE(napp, M_80211_NODE_IE);
return error;
}
napp->ie_len = ireq->i_len;
*aie = napp;
if (app != NULL)
IEEE80211_FREE(app, M_80211_NODE_IE);
return 0;
}
static void
setwparsnie(struct ieee80211vap *vap, uint8_t *ie, int space)
{
/* validate data is present as best we can */
if (space == 0 || 2+ie[1] > space)
return;
if (ie[0] == IEEE80211_ELEMID_VENDOR)
vap->iv_wpa_ie = ie;
else if (ie[0] == IEEE80211_ELEMID_RSN)
vap->iv_rsn_ie = ie;
}
static __noinline int
ieee80211_ioctl_setappie_locked(struct ieee80211vap *vap,
const struct ieee80211req *ireq, int fc0)
{
int error;
IEEE80211_LOCK_ASSERT(vap->iv_ic);
switch (fc0 & IEEE80211_FC0_SUBTYPE_MASK) {
case IEEE80211_FC0_SUBTYPE_BEACON:
if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
vap->iv_opmode != IEEE80211_M_IBSS) {
error = EINVAL;
break;
}
error = setappie(&vap->iv_appie_beacon, ireq);
if (error == 0)
ieee80211_beacon_notify(vap, IEEE80211_BEACON_APPIE);
break;
case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
error = setappie(&vap->iv_appie_proberesp, ireq);
break;
case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
if (vap->iv_opmode == IEEE80211_M_HOSTAP)
error = setappie(&vap->iv_appie_assocresp, ireq);
else
error = EINVAL;
break;
case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
error = setappie(&vap->iv_appie_probereq, ireq);
break;
case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
if (vap->iv_opmode == IEEE80211_M_STA)
error = setappie(&vap->iv_appie_assocreq, ireq);
else
error = EINVAL;
break;
case (IEEE80211_APPIE_WPA & IEEE80211_FC0_SUBTYPE_MASK):
error = setappie(&vap->iv_appie_wpa, ireq);
if (error == 0) {
/*
* Must split single blob of data into separate
* WPA and RSN ie's because they go in different
* locations in the mgt frames.
* XXX use IEEE80211_IOC_WPA2 so user code does split
*/
vap->iv_wpa_ie = NULL;
vap->iv_rsn_ie = NULL;
if (vap->iv_appie_wpa != NULL) {
struct ieee80211_appie *appie =
vap->iv_appie_wpa;
uint8_t *data = appie->ie_data;
/* XXX ie length validate is painful, cheat */
setwparsnie(vap, data, appie->ie_len);
setwparsnie(vap, data + 2 + data[1],
appie->ie_len - (2 + data[1]));
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS) {
/*
* Must rebuild beacon frame as the update
* mechanism doesn't handle WPA/RSN ie's.
* Could extend it but it doesn't normally
* change; this is just to deal with hostapd
* plumbing the ie after the interface is up.
*/
error = ENETRESET;
}
}
break;
default:
error = EINVAL;
break;
}
return error;
}
static __noinline int
ieee80211_ioctl_setappie(struct ieee80211vap *vap,
const struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
int error;
uint8_t fc0;
fc0 = ireq->i_val & 0xff;
if ((fc0 & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
return EINVAL;
/* NB: could check iv_opmode and reject but hardly worth the effort */
IEEE80211_LOCK(ic);
error = ieee80211_ioctl_setappie_locked(vap, ireq, fc0);
IEEE80211_UNLOCK(ic);
return error;
}
static __noinline int
ieee80211_ioctl_chanswitch(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_chanswitch_req csr;
struct ieee80211_channel *c;
int error;
if (ireq->i_len != sizeof(csr))
return EINVAL;
error = copyin(ireq->i_data, &csr, sizeof(csr));
if (error != 0)
return error;
/* XXX adhoc mode not supported */
if (vap->iv_opmode != IEEE80211_M_HOSTAP ||
(vap->iv_flags & IEEE80211_F_DOTH) == 0)
return EOPNOTSUPP;
c = ieee80211_find_channel(ic,
csr.csa_chan.ic_freq, csr.csa_chan.ic_flags);
if (c == NULL)
return ENOENT;
IEEE80211_LOCK(ic);
if ((ic->ic_flags & IEEE80211_F_CSAPENDING) == 0)
ieee80211_csa_startswitch(ic, c, csr.csa_mode, csr.csa_count);
else if (csr.csa_count == 0)
ieee80211_csa_cancelswitch(ic);
else
error = EBUSY;
IEEE80211_UNLOCK(ic);
return error;
}
static int
ieee80211_scanreq(struct ieee80211vap *vap, struct ieee80211_scan_req *sr)
{
#define IEEE80211_IOC_SCAN_FLAGS \
(IEEE80211_IOC_SCAN_NOPICK | IEEE80211_IOC_SCAN_ACTIVE | \
IEEE80211_IOC_SCAN_PICK1ST | IEEE80211_IOC_SCAN_BGSCAN | \
IEEE80211_IOC_SCAN_ONCE | IEEE80211_IOC_SCAN_NOBCAST | \
IEEE80211_IOC_SCAN_NOJOIN | IEEE80211_IOC_SCAN_FLUSH | \
IEEE80211_IOC_SCAN_CHECK)
struct ieee80211com *ic = vap->iv_ic;
int error, i;
/* convert duration */
if (sr->sr_duration == IEEE80211_IOC_SCAN_FOREVER)
sr->sr_duration = IEEE80211_SCAN_FOREVER;
else {
if (sr->sr_duration < IEEE80211_IOC_SCAN_DURATION_MIN ||
sr->sr_duration > IEEE80211_IOC_SCAN_DURATION_MAX)
return EINVAL;
sr->sr_duration = msecs_to_ticks(sr->sr_duration);
if (sr->sr_duration < 1)
sr->sr_duration = 1;
}
/* convert min/max channel dwell */
if (sr->sr_mindwell != 0) {
sr->sr_mindwell = msecs_to_ticks(sr->sr_mindwell);
if (sr->sr_mindwell < 1)
sr->sr_mindwell = 1;
}
if (sr->sr_maxdwell != 0) {
sr->sr_maxdwell = msecs_to_ticks(sr->sr_maxdwell);
if (sr->sr_maxdwell < 1)
sr->sr_maxdwell = 1;
}
/* NB: silently reduce ssid count to what is supported */
if (sr->sr_nssid > IEEE80211_SCAN_MAX_SSID)
sr->sr_nssid = IEEE80211_SCAN_MAX_SSID;
for (i = 0; i < sr->sr_nssid; i++)
if (sr->sr_ssid[i].len > IEEE80211_NWID_LEN)
return EINVAL;
/* cleanse flags just in case, could reject if invalid flags */
sr->sr_flags &= IEEE80211_IOC_SCAN_FLAGS;
/*
* Add an implicit NOPICK if the vap is not marked UP. This
* allows applications to scan without joining a bss (or picking
* a channel and setting up a bss) and without forcing manual
* roaming mode--you just need to mark the parent device UP.
*/
if ((vap->iv_ifp->if_flags & IFF_UP) == 0)
sr->sr_flags |= IEEE80211_IOC_SCAN_NOPICK;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
"%s: flags 0x%x%s duration 0x%x mindwell %u maxdwell %u nssid %d\n",
__func__, sr->sr_flags,
(vap->iv_ifp->if_flags & IFF_UP) == 0 ? " (!IFF_UP)" : "",
sr->sr_duration, sr->sr_mindwell, sr->sr_maxdwell, sr->sr_nssid);
/*
* If we are in INIT state then the driver has never had a chance
* to setup hardware state to do a scan; we must use the state
* machine to get us up to the SCAN state but once we reach SCAN
* state we then want to use the supplied params. Stash the
* parameters in the vap and mark IEEE80211_FEXT_SCANREQ; the
* state machines will recognize this and use the stashed params
* to issue the scan request.
*
* Otherwise just invoke the scan machinery directly.
*/
IEEE80211_LOCK(ic);
if (vap->iv_state == IEEE80211_S_INIT) {
/* NB: clobbers previous settings */
vap->iv_scanreq_flags = sr->sr_flags;
vap->iv_scanreq_duration = sr->sr_duration;
vap->iv_scanreq_nssid = sr->sr_nssid;
for (i = 0; i < sr->sr_nssid; i++) {
vap->iv_scanreq_ssid[i].len = sr->sr_ssid[i].len;
memcpy(vap->iv_scanreq_ssid[i].ssid,
sr->sr_ssid[i].ssid, sr->sr_ssid[i].len);
}
vap->iv_flags_ext |= IEEE80211_FEXT_SCANREQ;
IEEE80211_UNLOCK(ic);
ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
} else {
vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANREQ;
IEEE80211_UNLOCK(ic);
if (sr->sr_flags & IEEE80211_IOC_SCAN_CHECK) {
error = ieee80211_check_scan(vap, sr->sr_flags,
sr->sr_duration, sr->sr_mindwell, sr->sr_maxdwell,
sr->sr_nssid,
/* NB: cheat, we assume structures are compatible */
(const struct ieee80211_scan_ssid *) &sr->sr_ssid[0]);
} else {
error = ieee80211_start_scan(vap, sr->sr_flags,
sr->sr_duration, sr->sr_mindwell, sr->sr_maxdwell,
sr->sr_nssid,
/* NB: cheat, we assume structures are compatible */
(const struct ieee80211_scan_ssid *) &sr->sr_ssid[0]);
}
if (error == 0)
return EINPROGRESS;
}
return 0;
#undef IEEE80211_IOC_SCAN_FLAGS
}
static __noinline int
ieee80211_ioctl_scanreq(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
- struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_scan_req sr; /* XXX off stack? */
int error;
- /* NB: parent must be running */
- if ((ic->ic_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return ENXIO;
-
if (ireq->i_len != sizeof(sr))
return EINVAL;
error = copyin(ireq->i_data, &sr, sizeof(sr));
if (error != 0)
return error;
return ieee80211_scanreq(vap, &sr);
}
static __noinline int
ieee80211_ioctl_setstavlan(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
struct ieee80211_node *ni;
struct ieee80211req_sta_vlan vlan;
int error;
if (ireq->i_len != sizeof(vlan))
return EINVAL;
error = copyin(ireq->i_data, &vlan, sizeof(vlan));
if (error != 0)
return error;
if (!IEEE80211_ADDR_EQ(vlan.sv_macaddr, zerobssid)) {
ni = ieee80211_find_vap_node(&vap->iv_ic->ic_sta, vap,
vlan.sv_macaddr);
if (ni == NULL)
return ENOENT;
} else
ni = ieee80211_ref_node(vap->iv_bss);
ni->ni_vlan = vlan.sv_vlan;
ieee80211_free_node(ni);
return error;
}
static int
isvap11g(const struct ieee80211vap *vap)
{
const struct ieee80211_node *bss = vap->iv_bss;
return bss->ni_chan != IEEE80211_CHAN_ANYC &&
IEEE80211_IS_CHAN_ANYG(bss->ni_chan);
}
static int
isvapht(const struct ieee80211vap *vap)
{
const struct ieee80211_node *bss = vap->iv_bss;
return bss->ni_chan != IEEE80211_CHAN_ANYC &&
IEEE80211_IS_CHAN_HT(bss->ni_chan);
}
/*
* Dummy ioctl set handler so the linker set is defined.
*/
static int
dummy_ioctl_set(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
return ENOSYS;
}
IEEE80211_IOCTL_SET(dummy, dummy_ioctl_set);
static int
ieee80211_ioctl_setdefault(struct ieee80211vap *vap, struct ieee80211req *ireq)
{
ieee80211_ioctl_setfunc * const *set;
int error;
SET_FOREACH(set, ieee80211_ioctl_setset) {
error = (*set)(vap, ireq);
if (error != ENOSYS)
return error;
}
return EINVAL;
}
static __noinline int
ieee80211_ioctl_set80211(struct ieee80211vap *vap, u_long cmd, struct ieee80211req *ireq)
{
struct ieee80211com *ic = vap->iv_ic;
int error;
const struct ieee80211_authenticator *auth;
uint8_t tmpkey[IEEE80211_KEYBUF_SIZE];
char tmpssid[IEEE80211_NWID_LEN];
uint8_t tmpbssid[IEEE80211_ADDR_LEN];
struct ieee80211_key *k;
u_int kid;
uint32_t flags;
error = 0;
switch (ireq->i_type) {
case IEEE80211_IOC_SSID:
if (ireq->i_val != 0 ||
ireq->i_len > IEEE80211_NWID_LEN)
return EINVAL;
error = copyin(ireq->i_data, tmpssid, ireq->i_len);
if (error)
break;
memset(vap->iv_des_ssid[0].ssid, 0, IEEE80211_NWID_LEN);
vap->iv_des_ssid[0].len = ireq->i_len;
memcpy(vap->iv_des_ssid[0].ssid, tmpssid, ireq->i_len);
vap->iv_des_nssid = (ireq->i_len > 0);
error = ENETRESET;
break;
case IEEE80211_IOC_WEP:
switch (ireq->i_val) {
case IEEE80211_WEP_OFF:
vap->iv_flags &= ~IEEE80211_F_PRIVACY;
vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
break;
case IEEE80211_WEP_ON:
vap->iv_flags |= IEEE80211_F_PRIVACY;
vap->iv_flags |= IEEE80211_F_DROPUNENC;
break;
case IEEE80211_WEP_MIXED:
vap->iv_flags |= IEEE80211_F_PRIVACY;
vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
break;
}
error = ENETRESET;
break;
case IEEE80211_IOC_WEPKEY:
kid = (u_int) ireq->i_val;
if (kid >= IEEE80211_WEP_NKID)
return EINVAL;
k = &vap->iv_nw_keys[kid];
if (ireq->i_len == 0) {
/* zero-len =>'s delete any existing key */
(void) ieee80211_crypto_delkey(vap, k);
break;
}
if (ireq->i_len > sizeof(tmpkey))
return EINVAL;
memset(tmpkey, 0, sizeof(tmpkey));
error = copyin(ireq->i_data, tmpkey, ireq->i_len);
if (error)
break;
ieee80211_key_update_begin(vap);
k->wk_keyix = kid; /* NB: force fixed key id */
if (ieee80211_crypto_newkey(vap, IEEE80211_CIPHER_WEP,
IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV, k)) {
k->wk_keylen = ireq->i_len;
memcpy(k->wk_key, tmpkey, sizeof(tmpkey));
IEEE80211_ADDR_COPY(k->wk_macaddr, vap->iv_myaddr);
if (!ieee80211_crypto_setkey(vap, k))
error = EINVAL;
} else
error = EINVAL;
ieee80211_key_update_end(vap);
break;
case IEEE80211_IOC_WEPTXKEY:
kid = (u_int) ireq->i_val;
if (kid >= IEEE80211_WEP_NKID &&
(uint16_t) kid != IEEE80211_KEYIX_NONE)
return EINVAL;
vap->iv_def_txkey = kid;
break;
case IEEE80211_IOC_AUTHMODE:
switch (ireq->i_val) {
case IEEE80211_AUTH_WPA:
case IEEE80211_AUTH_8021X: /* 802.1x */
case IEEE80211_AUTH_OPEN: /* open */
case IEEE80211_AUTH_SHARED: /* shared-key */
case IEEE80211_AUTH_AUTO: /* auto */
auth = ieee80211_authenticator_get(ireq->i_val);
if (auth == NULL)
return EINVAL;
break;
default:
return EINVAL;
}
switch (ireq->i_val) {
case IEEE80211_AUTH_WPA: /* WPA w/ 802.1x */
vap->iv_flags |= IEEE80211_F_PRIVACY;
ireq->i_val = IEEE80211_AUTH_8021X;
break;
case IEEE80211_AUTH_OPEN: /* open */
vap->iv_flags &= ~(IEEE80211_F_WPA|IEEE80211_F_PRIVACY);
break;
case IEEE80211_AUTH_SHARED: /* shared-key */
case IEEE80211_AUTH_8021X: /* 802.1x */
vap->iv_flags &= ~IEEE80211_F_WPA;
/* both require a key so mark the PRIVACY capability */
vap->iv_flags |= IEEE80211_F_PRIVACY;
break;
case IEEE80211_AUTH_AUTO: /* auto */
vap->iv_flags &= ~IEEE80211_F_WPA;
/* XXX PRIVACY handling? */
/* XXX what's the right way to do this? */
break;
}
/* NB: authenticator attach/detach happens on state change */
vap->iv_bss->ni_authmode = ireq->i_val;
/* XXX mixed/mode/usage? */
vap->iv_auth = auth;
error = ENETRESET;
break;
case IEEE80211_IOC_CHANNEL:
error = ieee80211_ioctl_setchannel(vap, ireq);
break;
case IEEE80211_IOC_POWERSAVE:
switch (ireq->i_val) {
case IEEE80211_POWERSAVE_OFF:
if (vap->iv_flags & IEEE80211_F_PMGTON) {
ieee80211_syncflag(vap, -IEEE80211_F_PMGTON);
error = ERESTART;
}
break;
case IEEE80211_POWERSAVE_ON:
if ((vap->iv_caps & IEEE80211_C_PMGT) == 0)
error = EOPNOTSUPP;
else if ((vap->iv_flags & IEEE80211_F_PMGTON) == 0) {
ieee80211_syncflag(vap, IEEE80211_F_PMGTON);
error = ERESTART;
}
break;
default:
error = EINVAL;
break;
}
break;
case IEEE80211_IOC_POWERSAVESLEEP:
if (ireq->i_val < 0)
return EINVAL;
ic->ic_lintval = ireq->i_val;
error = ERESTART;
break;
case IEEE80211_IOC_RTSTHRESHOLD:
if (!(IEEE80211_RTS_MIN <= ireq->i_val &&
ireq->i_val <= IEEE80211_RTS_MAX))
return EINVAL;
vap->iv_rtsthreshold = ireq->i_val;
error = ERESTART;
break;
case IEEE80211_IOC_PROTMODE:
if (ireq->i_val > IEEE80211_PROT_RTSCTS)
return EINVAL;
ic->ic_protmode = (enum ieee80211_protmode)ireq->i_val;
/* NB: if not operating in 11g this can wait */
if (ic->ic_bsschan != IEEE80211_CHAN_ANYC &&
IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan))
error = ERESTART;
break;
case IEEE80211_IOC_TXPOWER:
if ((ic->ic_caps & IEEE80211_C_TXPMGT) == 0)
return EOPNOTSUPP;
if (!(IEEE80211_TXPOWER_MIN <= ireq->i_val &&
ireq->i_val <= IEEE80211_TXPOWER_MAX))
return EINVAL;
ic->ic_txpowlimit = ireq->i_val;
error = ERESTART;
break;
case IEEE80211_IOC_ROAMING:
if (!(IEEE80211_ROAMING_DEVICE <= ireq->i_val &&
ireq->i_val <= IEEE80211_ROAMING_MANUAL))
return EINVAL;
vap->iv_roaming = (enum ieee80211_roamingmode)ireq->i_val;
/* XXXX reset? */
break;
case IEEE80211_IOC_PRIVACY:
if (ireq->i_val) {
/* XXX check for key state? */
vap->iv_flags |= IEEE80211_F_PRIVACY;
} else
vap->iv_flags &= ~IEEE80211_F_PRIVACY;
/* XXX ERESTART? */
break;
case IEEE80211_IOC_DROPUNENCRYPTED:
if (ireq->i_val)
vap->iv_flags |= IEEE80211_F_DROPUNENC;
else
vap->iv_flags &= ~IEEE80211_F_DROPUNENC;
/* XXX ERESTART? */
break;
case IEEE80211_IOC_WPAKEY:
error = ieee80211_ioctl_setkey(vap, ireq);
break;
case IEEE80211_IOC_DELKEY:
error = ieee80211_ioctl_delkey(vap, ireq);
break;
case IEEE80211_IOC_MLME:
error = ieee80211_ioctl_setmlme(vap, ireq);
break;
case IEEE80211_IOC_COUNTERMEASURES:
if (ireq->i_val) {
if ((vap->iv_flags & IEEE80211_F_WPA) == 0)
return EOPNOTSUPP;
vap->iv_flags |= IEEE80211_F_COUNTERM;
} else
vap->iv_flags &= ~IEEE80211_F_COUNTERM;
/* XXX ERESTART? */
break;
case IEEE80211_IOC_WPA:
if (ireq->i_val > 3)
return EINVAL;
/* XXX verify ciphers available */
flags = vap->iv_flags & ~IEEE80211_F_WPA;
switch (ireq->i_val) {
case 1:
if (!(vap->iv_caps & IEEE80211_C_WPA1))
return EOPNOTSUPP;
flags |= IEEE80211_F_WPA1;
break;
case 2:
if (!(vap->iv_caps & IEEE80211_C_WPA2))
return EOPNOTSUPP;
flags |= IEEE80211_F_WPA2;
break;
case 3:
if ((vap->iv_caps & IEEE80211_C_WPA) != IEEE80211_C_WPA)
return EOPNOTSUPP;
flags |= IEEE80211_F_WPA1 | IEEE80211_F_WPA2;
break;
default: /* Can't set any -> error */
return EOPNOTSUPP;
}
vap->iv_flags = flags;
error = ERESTART; /* NB: can change beacon frame */
break;
case IEEE80211_IOC_WME:
if (ireq->i_val) {
if ((vap->iv_caps & IEEE80211_C_WME) == 0)
return EOPNOTSUPP;
ieee80211_syncflag(vap, IEEE80211_F_WME);
} else
ieee80211_syncflag(vap, -IEEE80211_F_WME);
error = ERESTART; /* NB: can change beacon frame */
break;
case IEEE80211_IOC_HIDESSID:
if (ireq->i_val)
vap->iv_flags |= IEEE80211_F_HIDESSID;
else
vap->iv_flags &= ~IEEE80211_F_HIDESSID;
error = ERESTART; /* XXX ENETRESET? */
break;
case IEEE80211_IOC_APBRIDGE:
if (ireq->i_val == 0)
vap->iv_flags |= IEEE80211_F_NOBRIDGE;
else
vap->iv_flags &= ~IEEE80211_F_NOBRIDGE;
break;
case IEEE80211_IOC_BSSID:
if (ireq->i_len != sizeof(tmpbssid))
return EINVAL;
error = copyin(ireq->i_data, tmpbssid, ireq->i_len);
if (error)
break;
IEEE80211_ADDR_COPY(vap->iv_des_bssid, tmpbssid);
if (IEEE80211_ADDR_EQ(vap->iv_des_bssid, zerobssid))
vap->iv_flags &= ~IEEE80211_F_DESBSSID;
else
vap->iv_flags |= IEEE80211_F_DESBSSID;
error = ENETRESET;
break;
case IEEE80211_IOC_CHANLIST:
error = ieee80211_ioctl_setchanlist(vap, ireq);
break;
#define OLD_IEEE80211_IOC_SCAN_REQ 23
#ifdef OLD_IEEE80211_IOC_SCAN_REQ
case OLD_IEEE80211_IOC_SCAN_REQ:
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
"%s: active scan request\n", __func__);
/*
* If we are in INIT state then the driver has never
* had a chance to setup hardware state to do a scan;
* use the state machine to get us up the SCAN state.
* Otherwise just invoke the scan machinery to start
* a one-time scan.
*/
if (vap->iv_state == IEEE80211_S_INIT)
ieee80211_new_state(vap, IEEE80211_S_SCAN, 0);
else
(void) ieee80211_start_scan(vap,
IEEE80211_SCAN_ACTIVE |
IEEE80211_SCAN_NOPICK |
IEEE80211_SCAN_ONCE,
IEEE80211_SCAN_FOREVER, 0, 0,
/* XXX use ioctl params */
vap->iv_des_nssid, vap->iv_des_ssid);
break;
#endif /* OLD_IEEE80211_IOC_SCAN_REQ */
case IEEE80211_IOC_SCAN_REQ:
error = ieee80211_ioctl_scanreq(vap, ireq);
break;
case IEEE80211_IOC_SCAN_CANCEL:
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
"%s: cancel scan\n", __func__);
ieee80211_cancel_scan(vap);
break;
case IEEE80211_IOC_HTCONF:
if (ireq->i_val & 1)
ieee80211_syncflag_ht(vap, IEEE80211_FHT_HT);
else
ieee80211_syncflag_ht(vap, -IEEE80211_FHT_HT);
if (ireq->i_val & 2)
ieee80211_syncflag_ht(vap, IEEE80211_FHT_USEHT40);
else
ieee80211_syncflag_ht(vap, -IEEE80211_FHT_USEHT40);
error = ENETRESET;
break;
case IEEE80211_IOC_ADDMAC:
case IEEE80211_IOC_DELMAC:
error = ieee80211_ioctl_macmac(vap, ireq);
break;
case IEEE80211_IOC_MACCMD:
error = ieee80211_ioctl_setmaccmd(vap, ireq);
break;
case IEEE80211_IOC_STA_STATS:
error = ieee80211_ioctl_setstastats(vap, ireq);
break;
case IEEE80211_IOC_STA_TXPOW:
error = ieee80211_ioctl_setstatxpow(vap, ireq);
break;
case IEEE80211_IOC_WME_CWMIN: /* WME: CWmin */
case IEEE80211_IOC_WME_CWMAX: /* WME: CWmax */
case IEEE80211_IOC_WME_AIFS: /* WME: AIFS */
case IEEE80211_IOC_WME_TXOPLIMIT: /* WME: txops limit */
case IEEE80211_IOC_WME_ACM: /* WME: ACM (bss only) */
case IEEE80211_IOC_WME_ACKPOLICY: /* WME: ACK policy (bss only) */
error = ieee80211_ioctl_setwmeparam(vap, ireq);
break;
case IEEE80211_IOC_DTIM_PERIOD:
if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
vap->iv_opmode != IEEE80211_M_MBSS &&
vap->iv_opmode != IEEE80211_M_IBSS)
return EINVAL;
if (IEEE80211_DTIM_MIN <= ireq->i_val &&
ireq->i_val <= IEEE80211_DTIM_MAX) {
vap->iv_dtim_period = ireq->i_val;
error = ENETRESET; /* requires restart */
} else
error = EINVAL;
break;
case IEEE80211_IOC_BEACON_INTERVAL:
if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
vap->iv_opmode != IEEE80211_M_MBSS &&
vap->iv_opmode != IEEE80211_M_IBSS)
return EINVAL;
if (IEEE80211_BINTVAL_MIN <= ireq->i_val &&
ireq->i_val <= IEEE80211_BINTVAL_MAX) {
ic->ic_bintval = ireq->i_val;
error = ENETRESET; /* requires restart */
} else
error = EINVAL;
break;
case IEEE80211_IOC_PUREG:
if (ireq->i_val)
vap->iv_flags |= IEEE80211_F_PUREG;
else
vap->iv_flags &= ~IEEE80211_F_PUREG;
/* NB: reset only if we're operating on an 11g channel */
if (isvap11g(vap))
error = ENETRESET;
break;
case IEEE80211_IOC_QUIET:
vap->iv_quiet= ireq->i_val;
break;
case IEEE80211_IOC_QUIET_COUNT:
vap->iv_quiet_count=ireq->i_val;
break;
case IEEE80211_IOC_QUIET_PERIOD:
vap->iv_quiet_period=ireq->i_val;
break;
case IEEE80211_IOC_QUIET_OFFSET:
vap->iv_quiet_offset=ireq->i_val;
break;
case IEEE80211_IOC_QUIET_DUR:
if(ireq->i_val < vap->iv_bss->ni_intval)
vap->iv_quiet_duration = ireq->i_val;
else
error = EINVAL;
break;
case IEEE80211_IOC_BGSCAN:
if (ireq->i_val) {
if ((vap->iv_caps & IEEE80211_C_BGSCAN) == 0)
return EOPNOTSUPP;
vap->iv_flags |= IEEE80211_F_BGSCAN;
} else
vap->iv_flags &= ~IEEE80211_F_BGSCAN;
break;
case IEEE80211_IOC_BGSCAN_IDLE:
if (ireq->i_val >= IEEE80211_BGSCAN_IDLE_MIN)
vap->iv_bgscanidle = ireq->i_val*hz/1000;
else
error = EINVAL;
break;
case IEEE80211_IOC_BGSCAN_INTERVAL:
if (ireq->i_val >= IEEE80211_BGSCAN_INTVAL_MIN)
vap->iv_bgscanintvl = ireq->i_val*hz;
else
error = EINVAL;
break;
case IEEE80211_IOC_SCANVALID:
if (ireq->i_val >= IEEE80211_SCAN_VALID_MIN)
vap->iv_scanvalid = ireq->i_val*hz;
else
error = EINVAL;
break;
case IEEE80211_IOC_FRAGTHRESHOLD:
if ((vap->iv_caps & IEEE80211_C_TXFRAG) == 0 &&
ireq->i_val != IEEE80211_FRAG_MAX)
return EOPNOTSUPP;
if (!(IEEE80211_FRAG_MIN <= ireq->i_val &&
ireq->i_val <= IEEE80211_FRAG_MAX))
return EINVAL;
vap->iv_fragthreshold = ireq->i_val;
error = ERESTART;
break;
case IEEE80211_IOC_BURST:
if (ireq->i_val) {
if ((vap->iv_caps & IEEE80211_C_BURST) == 0)
return EOPNOTSUPP;
ieee80211_syncflag(vap, IEEE80211_F_BURST);
} else
ieee80211_syncflag(vap, -IEEE80211_F_BURST);
error = ERESTART;
break;
case IEEE80211_IOC_BMISSTHRESHOLD:
if (!(IEEE80211_HWBMISS_MIN <= ireq->i_val &&
ireq->i_val <= IEEE80211_HWBMISS_MAX))
return EINVAL;
vap->iv_bmissthreshold = ireq->i_val;
error = ERESTART;
break;
case IEEE80211_IOC_CURCHAN:
error = ieee80211_ioctl_setcurchan(vap, ireq);
break;
case IEEE80211_IOC_SHORTGI:
if (ireq->i_val) {
#define IEEE80211_HTCAP_SHORTGI \
(IEEE80211_HTCAP_SHORTGI20 | IEEE80211_HTCAP_SHORTGI40)
if (((ireq->i_val ^ vap->iv_htcaps) & IEEE80211_HTCAP_SHORTGI) != 0)
return EINVAL;
if (ireq->i_val & IEEE80211_HTCAP_SHORTGI20)
vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI20;
if (ireq->i_val & IEEE80211_HTCAP_SHORTGI40)
vap->iv_flags_ht |= IEEE80211_FHT_SHORTGI40;
#undef IEEE80211_HTCAP_SHORTGI
} else
vap->iv_flags_ht &=
~(IEEE80211_FHT_SHORTGI20 | IEEE80211_FHT_SHORTGI40);
error = ERESTART;
break;
case IEEE80211_IOC_AMPDU:
if (ireq->i_val && (vap->iv_htcaps & IEEE80211_HTC_AMPDU) == 0)
return EINVAL;
if (ireq->i_val & 1)
vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_TX;
else
vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_TX;
if (ireq->i_val & 2)
vap->iv_flags_ht |= IEEE80211_FHT_AMPDU_RX;
else
vap->iv_flags_ht &= ~IEEE80211_FHT_AMPDU_RX;
/* NB: reset only if we're operating on an 11n channel */
if (isvapht(vap))
error = ERESTART;
break;
case IEEE80211_IOC_AMPDU_LIMIT:
if (!(IEEE80211_HTCAP_MAXRXAMPDU_8K <= ireq->i_val &&
ireq->i_val <= IEEE80211_HTCAP_MAXRXAMPDU_64K))
return EINVAL;
if (vap->iv_opmode == IEEE80211_M_HOSTAP)
vap->iv_ampdu_rxmax = ireq->i_val;
else
vap->iv_ampdu_limit = ireq->i_val;
error = ERESTART;
break;
case IEEE80211_IOC_AMPDU_DENSITY:
if (!(IEEE80211_HTCAP_MPDUDENSITY_NA <= ireq->i_val &&
ireq->i_val <= IEEE80211_HTCAP_MPDUDENSITY_16))
return EINVAL;
vap->iv_ampdu_density = ireq->i_val;
error = ERESTART;
break;
case IEEE80211_IOC_AMSDU:
if (ireq->i_val && (vap->iv_htcaps & IEEE80211_HTC_AMSDU) == 0)
return EINVAL;
if (ireq->i_val & 1)
vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_TX;
else
vap->iv_flags_ht &= ~IEEE80211_FHT_AMSDU_TX;
if (ireq->i_val & 2)
vap->iv_flags_ht |= IEEE80211_FHT_AMSDU_RX;
else
vap->iv_flags_ht &= ~IEEE80211_FHT_AMSDU_RX;
/* NB: reset only if we're operating on an 11n channel */
if (isvapht(vap))
error = ERESTART;
break;
case IEEE80211_IOC_AMSDU_LIMIT:
/* XXX validate */
vap->iv_amsdu_limit = ireq->i_val; /* XXX truncation? */
break;
case IEEE80211_IOC_PUREN:
if (ireq->i_val) {
if ((vap->iv_flags_ht & IEEE80211_FHT_HT) == 0)
return EINVAL;
vap->iv_flags_ht |= IEEE80211_FHT_PUREN;
} else
vap->iv_flags_ht &= ~IEEE80211_FHT_PUREN;
/* NB: reset only if we're operating on an 11n channel */
if (isvapht(vap))
error = ERESTART;
break;
case IEEE80211_IOC_DOTH:
if (ireq->i_val) {
#if 0
/* XXX no capability */
if ((vap->iv_caps & IEEE80211_C_DOTH) == 0)
return EOPNOTSUPP;
#endif
vap->iv_flags |= IEEE80211_F_DOTH;
} else
vap->iv_flags &= ~IEEE80211_F_DOTH;
error = ENETRESET;
break;
case IEEE80211_IOC_REGDOMAIN:
error = ieee80211_ioctl_setregdomain(vap, ireq);
break;
case IEEE80211_IOC_ROAM:
error = ieee80211_ioctl_setroam(vap, ireq);
break;
case IEEE80211_IOC_TXPARAMS:
error = ieee80211_ioctl_settxparams(vap, ireq);
break;
case IEEE80211_IOC_HTCOMPAT:
if (ireq->i_val) {
if ((vap->iv_flags_ht & IEEE80211_FHT_HT) == 0)
return EOPNOTSUPP;
vap->iv_flags_ht |= IEEE80211_FHT_HTCOMPAT;
} else
vap->iv_flags_ht &= ~IEEE80211_FHT_HTCOMPAT;
/* NB: reset only if we're operating on an 11n channel */
if (isvapht(vap))
error = ERESTART;
break;
case IEEE80211_IOC_DWDS:
if (ireq->i_val) {
/* NB: DWDS only makes sense for WDS-capable devices */
if ((ic->ic_caps & IEEE80211_C_WDS) == 0)
return EOPNOTSUPP;
/* NB: DWDS is used only with ap+sta vaps */
if (vap->iv_opmode != IEEE80211_M_HOSTAP &&
vap->iv_opmode != IEEE80211_M_STA)
return EINVAL;
vap->iv_flags |= IEEE80211_F_DWDS;
if (vap->iv_opmode == IEEE80211_M_STA)
vap->iv_flags_ext |= IEEE80211_FEXT_4ADDR;
} else {
vap->iv_flags &= ~IEEE80211_F_DWDS;
if (vap->iv_opmode == IEEE80211_M_STA)
vap->iv_flags_ext &= ~IEEE80211_FEXT_4ADDR;
}
break;
case IEEE80211_IOC_INACTIVITY:
if (ireq->i_val)
vap->iv_flags_ext |= IEEE80211_FEXT_INACT;
else
vap->iv_flags_ext &= ~IEEE80211_FEXT_INACT;
break;
case IEEE80211_IOC_APPIE:
error = ieee80211_ioctl_setappie(vap, ireq);
break;
case IEEE80211_IOC_WPS:
if (ireq->i_val) {
if ((vap->iv_caps & IEEE80211_C_WPA) == 0)
return EOPNOTSUPP;
vap->iv_flags_ext |= IEEE80211_FEXT_WPS;
} else
vap->iv_flags_ext &= ~IEEE80211_FEXT_WPS;
break;
case IEEE80211_IOC_TSN:
if (ireq->i_val) {
if ((vap->iv_caps & IEEE80211_C_WPA) == 0)
return EOPNOTSUPP;
vap->iv_flags_ext |= IEEE80211_FEXT_TSN;
} else
vap->iv_flags_ext &= ~IEEE80211_FEXT_TSN;
break;
case IEEE80211_IOC_CHANSWITCH:
error = ieee80211_ioctl_chanswitch(vap, ireq);
break;
case IEEE80211_IOC_DFS:
if (ireq->i_val) {
if ((vap->iv_caps & IEEE80211_C_DFS) == 0)
return EOPNOTSUPP;
/* NB: DFS requires 11h support */
if ((vap->iv_flags & IEEE80211_F_DOTH) == 0)
return EINVAL;
vap->iv_flags_ext |= IEEE80211_FEXT_DFS;
} else
vap->iv_flags_ext &= ~IEEE80211_FEXT_DFS;
break;
case IEEE80211_IOC_DOTD:
if (ireq->i_val)
vap->iv_flags_ext |= IEEE80211_FEXT_DOTD;
else
vap->iv_flags_ext &= ~IEEE80211_FEXT_DOTD;
if (vap->iv_opmode == IEEE80211_M_STA)
error = ENETRESET;
break;
case IEEE80211_IOC_HTPROTMODE:
if (ireq->i_val > IEEE80211_PROT_RTSCTS)
return EINVAL;
ic->ic_htprotmode = ireq->i_val ?
IEEE80211_PROT_RTSCTS : IEEE80211_PROT_NONE;
/* NB: if not operating in 11n this can wait */
if (isvapht(vap))
error = ERESTART;
break;
case IEEE80211_IOC_STA_VLAN:
error = ieee80211_ioctl_setstavlan(vap, ireq);
break;
case IEEE80211_IOC_SMPS:
if ((ireq->i_val &~ IEEE80211_HTCAP_SMPS) != 0 ||
ireq->i_val == 0x0008) /* value of 2 is reserved */
return EINVAL;
if (ireq->i_val != IEEE80211_HTCAP_SMPS_OFF &&
(vap->iv_htcaps & IEEE80211_HTC_SMPS) == 0)
return EOPNOTSUPP;
vap->iv_htcaps = (vap->iv_htcaps &~ IEEE80211_HTCAP_SMPS) |
ireq->i_val;
/* NB: if not operating in 11n this can wait */
if (isvapht(vap))
error = ERESTART;
break;
case IEEE80211_IOC_RIFS:
if (ireq->i_val != 0) {
if ((vap->iv_htcaps & IEEE80211_HTC_RIFS) == 0)
return EOPNOTSUPP;
vap->iv_flags_ht |= IEEE80211_FHT_RIFS;
} else
vap->iv_flags_ht &= ~IEEE80211_FHT_RIFS;
/* NB: if not operating in 11n this can wait */
if (isvapht(vap))
error = ERESTART;
break;
default:
error = ieee80211_ioctl_setdefault(vap, ireq);
break;
}
/*
* The convention is that ENETRESET means an operation
* requires a complete re-initialization of the device (e.g.
* changing something that affects the association state).
* ERESTART means the request may be handled with only a
* reload of the hardware state. We hand ERESTART requests
* to the iv_reset callback so the driver can decide. If
* a device does not fillin iv_reset then it defaults to one
* that returns ENETRESET. Otherwise a driver may return
* ENETRESET (in which case a full reset will be done) or
* 0 to mean there's no need to do anything (e.g. when the
* change has no effect on the driver/device).
*/
if (error == ERESTART)
error = IFNET_IS_UP_RUNNING(vap->iv_ifp) ?
vap->iv_reset(vap, ireq->i_type) : 0;
if (error == ENETRESET) {
/* XXX need to re-think AUTO handling */
if (IS_UP_AUTO(vap))
ieee80211_init(vap);
error = 0;
}
return error;
}
-/*
- * Rebuild the parent's multicast address list after an add/del
- * of a multicast address for a vap. We have no way to tell
- * what happened above to optimize the work so we purge the entire
- * list and rebuild from scratch. This is way expensive.
- * Note also the half-baked workaround for if_addmulti calling
- * back to the parent device; there's no way to insert mcast
- * entries quietly and/or cheaply.
- */
-static void
-ieee80211_ioctl_updatemulti(struct ieee80211com *ic)
-{
- struct ifnet *parent = ic->ic_ifp;
- struct ieee80211vap *vap;
- void *ioctl;
-
- IEEE80211_LOCK(ic);
- if_delallmulti(parent);
- ioctl = parent->if_ioctl; /* XXX WAR if_allmulti */
- parent->if_ioctl = NULL;
- TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
- struct ifnet *ifp = vap->iv_ifp;
- struct ifmultiaddr *ifma;
-
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- (void) if_addmulti(parent, ifma->ifma_addr, NULL);
- }
- }
- parent->if_ioctl = ioctl;
- ieee80211_runtask(ic, &ic->ic_mcast_task);
- IEEE80211_UNLOCK(ic);
-}
-
int
ieee80211_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
int error = 0;
struct ifreq *ifr;
struct ifaddr *ifa; /* XXX */
switch (cmd) {
case SIOCSIFFLAGS:
IEEE80211_LOCK(ic);
- ieee80211_syncifflag_locked(ic, IFF_PROMISC);
- ieee80211_syncifflag_locked(ic, IFF_ALLMULTI);
+ if ((ifp->if_flags ^ vap->iv_ifflags) & IFF_PROMISC)
+ ieee80211_promisc(vap, ifp->if_flags & IFF_PROMISC);
+ if ((ifp->if_flags ^ vap->iv_ifflags) & IFF_ALLMULTI)
+ ieee80211_allmulti(vap, ifp->if_flags & IFF_ALLMULTI);
+ vap->iv_ifflags = ifp->if_flags;
if (ifp->if_flags & IFF_UP) {
/*
* Bring ourself up unless we're already operational.
* If we're the first vap and the parent is not up
* then it will automatically be brought up as a
* side-effect of bringing ourself up.
*/
if (vap->iv_state == IEEE80211_S_INIT)
ieee80211_start_locked(vap);
} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
/*
* Stop ourself. If we are the last vap to be
* marked down the parent will also be taken down.
*/
ieee80211_stop_locked(vap);
}
IEEE80211_UNLOCK(ic);
/* Wait for parent ioctl handler if it was queued */
ieee80211_waitfor_parent(ic);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
- ieee80211_ioctl_updatemulti(ic);
+ ieee80211_runtask(ic, &ic->ic_mcast_task);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
ifr = (struct ifreq *)data;
error = ifmedia_ioctl(ifp, ifr, &vap->iv_media, cmd);
break;
case SIOCG80211:
error = ieee80211_ioctl_get80211(vap, cmd,
(struct ieee80211req *) data);
break;
case SIOCS80211:
error = priv_check(curthread, PRIV_NET80211_MANAGE);
if (error == 0)
error = ieee80211_ioctl_set80211(vap, cmd,
(struct ieee80211req *) data);
break;
case SIOCG80211STATS:
ifr = (struct ifreq *)data;
copyout(&vap->iv_stats, ifr->ifr_data, sizeof (vap->iv_stats));
break;
case SIOCSIFMTU:
ifr = (struct ifreq *)data;
if (!(IEEE80211_MTU_MIN <= ifr->ifr_mtu &&
ifr->ifr_mtu <= IEEE80211_MTU_MAX))
error = EINVAL;
else
ifp->if_mtu = ifr->ifr_mtu;
break;
case SIOCSIFADDR:
/*
* XXX Handle this directly so we can supress if_init calls.
* XXX This should be done in ether_ioctl but for the moment
* XXX there are too many other parts of the system that
* XXX set IFF_UP and so supress if_init being called when
* XXX it should be.
*/
ifa = (struct ifaddr *) data;
switch (ifa->ifa_addr->sa_family) {
#ifdef INET
case AF_INET:
if ((ifp->if_flags & IFF_UP) == 0) {
ifp->if_flags |= IFF_UP;
ifp->if_init(ifp->if_softc);
}
arp_ifinit(ifp, ifa);
break;
#endif
default:
if ((ifp->if_flags & IFF_UP) == 0) {
ifp->if_flags |= IFF_UP;
ifp->if_init(ifp->if_softc);
}
break;
}
break;
- /* Pass NDIS ioctls up to the driver */
- case SIOCGDRVSPEC:
- case SIOCSDRVSPEC:
- case SIOCGPRIVATE_0: {
- struct ifnet *parent = vap->iv_ic->ic_ifp;
- error = parent->if_ioctl(parent, cmd, data);
- break;
- }
default:
+ /*
+ * Pass unknown ioctls first to the driver, and if it
+ * returns ENOTTY, then to the generic Ethernet handler.
+ */
+ if (ic->ic_ioctl != NULL &&
+ (error = ic->ic_ioctl(ic, cmd, data)) != ENOTTY)
+ break;
error = ether_ioctl(ifp, cmd, data);
break;
}
- return error;
+ return (error);
}
Index: head/sys/net80211/ieee80211_output.c
===================================================================
--- head/sys/net80211/ieee80211_output.c (revision 287196)
+++ head/sys/net80211/ieee80211_output.c (revision 287197)
@@ -1,3465 +1,3463 @@
/*-
* Copyright (c) 2001 Atsushi Onoe
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/endian.h>
#include <sys/socket.h>
#include <net/bpf.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_vlan_var.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#include <net80211/ieee80211_wds.h>
#include <net80211/ieee80211_mesh.h>
#if defined(INET) || defined(INET6)
#include <netinet/in.h>
#endif
#ifdef INET
#include <netinet/if_ether.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#endif
#ifdef INET6
#include <netinet/ip6.h>
#endif
#include <security/mac/mac_framework.h>
#define ETHER_HEADER_COPY(dst, src) \
memcpy(dst, src, sizeof(struct ether_header))
/* unalligned little endian access */
#define LE_WRITE_2(p, v) do { \
((uint8_t *)(p))[0] = (v) & 0xff; \
((uint8_t *)(p))[1] = ((v) >> 8) & 0xff; \
} while (0)
#define LE_WRITE_4(p, v) do { \
((uint8_t *)(p))[0] = (v) & 0xff; \
((uint8_t *)(p))[1] = ((v) >> 8) & 0xff; \
((uint8_t *)(p))[2] = ((v) >> 16) & 0xff; \
((uint8_t *)(p))[3] = ((v) >> 24) & 0xff; \
} while (0)
static int ieee80211_fragment(struct ieee80211vap *, struct mbuf *,
u_int hdrsize, u_int ciphdrsize, u_int mtu);
static void ieee80211_tx_mgt_cb(struct ieee80211_node *, void *, int);
#ifdef IEEE80211_DEBUG
/*
* Decide if an outbound management frame should be
* printed when debugging is enabled. This filters some
* of the less interesting frames that come frequently
* (e.g. beacons).
*/
static __inline int
doprint(struct ieee80211vap *vap, int subtype)
{
switch (subtype) {
case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
return (vap->iv_opmode == IEEE80211_M_IBSS);
}
return 1;
}
#endif
/*
* Transmit a frame to the given destination on the given VAP.
*
* It's up to the caller to figure out the details of who this
* is going to and resolving the node.
*
* This routine takes care of queuing it for power save,
* A-MPDU state stuff, fast-frames state stuff, encapsulation
* if required, then passing it up to the driver layer.
*
* This routine (for now) consumes the mbuf and frees the node
* reference; it ideally will return a TX status which reflects
* whether the mbuf was consumed or not, so the caller can
* free the mbuf (if appropriate) and the node reference (again,
* if appropriate.)
*/
int
ieee80211_vap_pkt_send_dest(struct ieee80211vap *vap, struct mbuf *m,
struct ieee80211_node *ni)
{
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = vap->iv_ifp;
int error, len, mcast;
if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
(m->m_flags & M_PWR_SAV) == 0) {
/*
* Station in power save mode; pass the frame
* to the 802.11 layer and continue. We'll get
* the frame back when the time is right.
* XXX lose WDS vap linkage?
*/
if (ieee80211_pwrsave(ni, m) != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
/*
* We queued it fine, so tell the upper layer
* that we consumed it.
*/
return (0);
}
/* calculate priority so drivers can find the tx queue */
if (ieee80211_classify(ni, m)) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT,
ni->ni_macaddr, NULL,
"%s", "classification failure");
vap->iv_stats.is_tx_classify++;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
ieee80211_free_node(ni);
/* XXX better status? */
return (0);
}
/*
* Stash the node pointer. Note that we do this after
* any call to ieee80211_dwds_mcast because that code
* uses any existing value for rcvif to identify the
* interface it (might have been) received on.
*/
m->m_pkthdr.rcvif = (void *)ni;
mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1: 0;
len = m->m_pkthdr.len;
BPF_MTAP(ifp, m); /* 802.3 tx */
/*
* Check if A-MPDU tx aggregation is setup or if we
* should try to enable it. The sta must be associated
* with HT and A-MPDU enabled for use. When the policy
* routine decides we should enable A-MPDU we issue an
* ADDBA request and wait for a reply. The frame being
* encapsulated will go out w/o using A-MPDU, or possibly
* it might be collected by the driver and held/retransmit.
* The default ic_ampdu_enable routine handles staggering
* ADDBA requests in case the receiver NAK's us or we are
* otherwise unable to establish a BA stream.
*/
if ((ni->ni_flags & IEEE80211_NODE_AMPDU_TX) &&
(vap->iv_flags_ht & IEEE80211_FHT_AMPDU_TX) &&
(m->m_flags & M_EAPOL) == 0) {
int tid = WME_AC_TO_TID(M_WME_GETAC(m));
struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid];
ieee80211_txampdu_count_packet(tap);
if (IEEE80211_AMPDU_RUNNING(tap)) {
/*
* Operational, mark frame for aggregation.
*
* XXX do tx aggregation here
*/
m->m_flags |= M_AMPDU_MPDU;
} else if (!IEEE80211_AMPDU_REQUESTED(tap) &&
ic->ic_ampdu_enable(ni, tap)) {
/*
* Not negotiated yet, request service.
*/
ieee80211_ampdu_request(ni, tap);
/* XXX hold frame for reply? */
}
}
#ifdef IEEE80211_SUPPORT_SUPERG
else if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF)) {
m = ieee80211_ff_check(ni, m);
if (m == NULL) {
/* NB: any ni ref held on stageq */
return (0);
}
}
#endif /* IEEE80211_SUPPORT_SUPERG */
/*
* Grab the TX lock - serialise the TX process from this
* point (where TX state is being checked/modified)
* through to driver queue.
*/
IEEE80211_TX_LOCK(ic);
if (__predict_true((vap->iv_caps & IEEE80211_C_8023ENCAP) == 0)) {
/*
* Encapsulate the packet in prep for transmission.
*/
m = ieee80211_encap(vap, ni, m);
if (m == NULL) {
/* NB: stat+msg handled in ieee80211_encap */
IEEE80211_TX_UNLOCK(ic);
ieee80211_free_node(ni);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
}
error = ieee80211_parent_xmitpkt(ic, m);
/*
* Unlock at this point - no need to hold it across
* ieee80211_free_node() (ie, the comlock)
*/
IEEE80211_TX_UNLOCK(ic);
if (error != 0) {
/* NB: IFQ_HANDOFF reclaims mbuf */
ieee80211_free_node(ni);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- } else {
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast);
- if_inc_counter(ifp, IFCOUNTER_OBYTES, len);
}
ic->ic_lastdata = ticks;
return (0);
}
/*
* Send the given mbuf through the given vap.
*
* This consumes the mbuf regardless of whether the transmit
* was successful or not.
*
* This does none of the initial checks that ieee80211_start()
* does (eg CAC timeout, interface wakeup) - the caller must
* do this first.
*/
static int
ieee80211_start_pkt(struct ieee80211vap *vap, struct mbuf *m)
{
#define IS_DWDS(vap) \
(vap->iv_opmode == IEEE80211_M_WDS && \
(vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) == 0)
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = vap->iv_ifp;
struct ieee80211_node *ni;
struct ether_header *eh;
/*
* Cancel any background scan.
*/
if (ic->ic_flags & IEEE80211_F_SCAN)
ieee80211_cancel_anyscan(vap);
/*
* Find the node for the destination so we can do
* things like power save and fast frames aggregation.
*
* NB: past this point various code assumes the first
* mbuf has the 802.3 header present (and contiguous).
*/
ni = NULL;
if (m->m_len < sizeof(struct ether_header) &&
(m = m_pullup(m, sizeof(struct ether_header))) == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
"discard frame, %s\n", "m_pullup failed");
vap->iv_stats.is_tx_nobuf++; /* XXX */
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENOBUFS);
}
eh = mtod(m, struct ether_header *);
if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
if (IS_DWDS(vap)) {
/*
* Only unicast frames from the above go out
* DWDS vaps; multicast frames are handled by
* dispatching the frame as it comes through
* the AP vap (see below).
*/
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_WDS,
eh->ether_dhost, "mcast", "%s", "on DWDS");
vap->iv_stats.is_dwds_mcast++;
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
/* XXX better status? */
return (ENOBUFS);
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
/*
* Spam DWDS vap's w/ multicast traffic.
*/
/* XXX only if dwds in use? */
ieee80211_dwds_mcast(vap, m);
}
}
#ifdef IEEE80211_SUPPORT_MESH
if (vap->iv_opmode != IEEE80211_M_MBSS) {
#endif
ni = ieee80211_find_txnode(vap, eh->ether_dhost);
if (ni == NULL) {
/* NB: ieee80211_find_txnode does stat+msg */
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
/* XXX better status? */
return (ENOBUFS);
}
if (ni->ni_associd == 0 &&
(ni->ni_flags & IEEE80211_NODE_ASSOCID)) {
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_OUTPUT,
eh->ether_dhost, NULL,
"sta not associated (type 0x%04x)",
htons(eh->ether_type));
vap->iv_stats.is_tx_notassoc++;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
ieee80211_free_node(ni);
/* XXX better status? */
return (ENOBUFS);
}
#ifdef IEEE80211_SUPPORT_MESH
} else {
if (!IEEE80211_ADDR_EQ(eh->ether_shost, vap->iv_myaddr)) {
/*
* Proxy station only if configured.
*/
if (!ieee80211_mesh_isproxyena(vap)) {
IEEE80211_DISCARD_MAC(vap,
IEEE80211_MSG_OUTPUT |
IEEE80211_MSG_MESH,
eh->ether_dhost, NULL,
"%s", "proxy not enabled");
vap->iv_stats.is_mesh_notproxy++;
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
m_freem(m);
/* XXX better status? */
return (ENOBUFS);
}
IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
"forward frame from DS SA(%6D), DA(%6D)\n",
eh->ether_shost, ":",
eh->ether_dhost, ":");
ieee80211_mesh_proxy_check(vap, eh->ether_shost);
}
ni = ieee80211_mesh_discover(vap, eh->ether_dhost, m);
if (ni == NULL) {
/*
* NB: ieee80211_mesh_discover holds/disposes
* frame (e.g. queueing on path discovery).
*/
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
/* XXX better status? */
return (ENOBUFS);
}
}
#endif
/*
* We've resolved the sender, so attempt to transmit it.
*/
if (vap->iv_state == IEEE80211_S_SLEEP) {
/*
* In power save; queue frame and then wakeup device
* for transmit.
*/
ic->ic_lastdata = ticks;
if (ieee80211_pwrsave(ni, m) != 0)
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
ieee80211_free_node(ni);
ieee80211_new_state(vap, IEEE80211_S_RUN, 0);
return (0);
}
if (ieee80211_vap_pkt_send_dest(vap, m, ni) != 0)
return (ENOBUFS);
return (0);
#undef IS_DWDS
}
/*
* Start method for vap's. All packets from the stack come
* through here. We handle common processing of the packets
* before dispatching them to the underlying device.
*
* if_transmit() requires that the mbuf be consumed by this call
* regardless of the return condition.
*/
int
ieee80211_vap_transmit(struct ifnet *ifp, struct mbuf *m)
{
struct ieee80211vap *vap = ifp->if_softc;
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *parent = ic->ic_ifp;
- /* NB: parent must be up and running */
- if (!IFNET_IS_UP_RUNNING(parent)) {
- IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
- "%s: ignore queue, parent %s not up+running\n",
- __func__, parent->if_xname);
- m_freem(m);
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- return (ENETDOWN);
- }
-
/*
* No data frames go out unless we're running.
* Note in particular this covers CAC and CSA
* states (though maybe we should check muting
* for CSA).
*/
if (vap->iv_state != IEEE80211_S_RUN &&
vap->iv_state != IEEE80211_S_SLEEP) {
IEEE80211_LOCK(ic);
/* re-check under the com lock to avoid races */
if (vap->iv_state != IEEE80211_S_RUN &&
vap->iv_state != IEEE80211_S_SLEEP) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
"%s: ignore queue, in %s state\n",
__func__, ieee80211_state_name[vap->iv_state]);
vap->iv_stats.is_tx_badstate++;
IEEE80211_UNLOCK(ic);
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
m_freem(m);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return (ENETDOWN);
}
IEEE80211_UNLOCK(ic);
}
/*
* Sanitize mbuf flags for net80211 use. We cannot
* clear M_PWR_SAV or M_MORE_DATA because these may
* be set for frames that are re-submitted from the
* power save queue.
*
* NB: This must be done before ieee80211_classify as
* it marks EAPOL in frames with M_EAPOL.
*/
m->m_flags &= ~(M_80211_TX - M_PWR_SAV - M_MORE_DATA);
/*
* Bump to the packet transmission path.
* The mbuf will be consumed here.
*/
return (ieee80211_start_pkt(vap, m));
}
void
ieee80211_vap_qflush(struct ifnet *ifp)
{
/* Empty for now */
}
/*
* 802.11 raw output routine.
*
* XXX TODO: this (and other send routines) should correctly
* XXX keep the pwr mgmt bit set if it decides to call into the
* XXX driver to send a frame whilst the state is SLEEP.
*
* Otherwise the peer may decide that we're awake and flood us
* with traffic we are still too asleep to receive!
*/
int
ieee80211_raw_output(struct ieee80211vap *vap, struct ieee80211_node *ni,
struct mbuf *m, const struct ieee80211_bpf_params *params)
{
struct ieee80211com *ic = vap->iv_ic;
+ int error;
/*
* Set node - the caller has taken a reference, so ensure
* that the mbuf has the same node value that
* it would if it were going via the normal path.
*/
m->m_pkthdr.rcvif = (void *)ni;
/*
* Attempt to add bpf transmit parameters.
*
* For now it's ok to fail; the raw_xmit api still takes
* them as an option.
*
* Later on when ic_raw_xmit() has params removed,
* they'll have to be added - so fail the transmit if
* they can't be.
*/
if (params)
(void) ieee80211_add_xmit_params(m, params);
- return (ic->ic_raw_xmit(ni, m, params));
+ error = ic->ic_raw_xmit(ni, m, params);
+ if (error)
+ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, 1);
+ return (error);
}
/*
* 802.11 output routine. This is (currently) used only to
* connect bpf write calls to the 802.11 layer for injecting
* raw 802.11 frames.
*/
int
ieee80211_output(struct ifnet *ifp, struct mbuf *m,
const struct sockaddr *dst, struct route *ro)
{
#define senderr(e) do { error = (e); goto bad;} while (0)
struct ieee80211_node *ni = NULL;
struct ieee80211vap *vap;
struct ieee80211_frame *wh;
struct ieee80211com *ic = NULL;
int error;
int ret;
if (ifp->if_drv_flags & IFF_DRV_OACTIVE) {
/*
* Short-circuit requests if the vap is marked OACTIVE
* as this can happen because a packet came down through
* ieee80211_start before the vap entered RUN state in
* which case it's ok to just drop the frame. This
* should not be necessary but callers of if_output don't
* check OACTIVE.
*/
senderr(ENETDOWN);
}
vap = ifp->if_softc;
ic = vap->iv_ic;
/*
* Hand to the 802.3 code if not tagged as
* a raw 802.11 frame.
*/
if (dst->sa_family != AF_IEEE80211)
return vap->iv_output(ifp, m, dst, ro);
#ifdef MAC
error = mac_ifnet_check_transmit(ifp, m);
if (error)
senderr(error);
#endif
if (ifp->if_flags & IFF_MONITOR)
senderr(ENETDOWN);
if (!IFNET_IS_UP_RUNNING(ifp))
senderr(ENETDOWN);
if (vap->iv_state == IEEE80211_S_CAC) {
IEEE80211_DPRINTF(vap,
IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH,
"block %s frame in CAC state\n", "raw data");
vap->iv_stats.is_tx_badstate++;
senderr(EIO); /* XXX */
} else if (vap->iv_state == IEEE80211_S_SCAN)
senderr(EIO);
/* XXX bypass bridge, pfil, carp, etc. */
if (m->m_pkthdr.len < sizeof(struct ieee80211_frame_ack))
senderr(EIO); /* XXX */
wh = mtod(m, struct ieee80211_frame *);
if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
IEEE80211_FC0_VERSION_0)
senderr(EIO); /* XXX */
/* locate destination node */
switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
case IEEE80211_FC1_DIR_NODS:
case IEEE80211_FC1_DIR_FROMDS:
ni = ieee80211_find_txnode(vap, wh->i_addr1);
break;
case IEEE80211_FC1_DIR_TODS:
case IEEE80211_FC1_DIR_DSTODS:
if (m->m_pkthdr.len < sizeof(struct ieee80211_frame))
senderr(EIO); /* XXX */
ni = ieee80211_find_txnode(vap, wh->i_addr3);
break;
default:
senderr(EIO); /* XXX */
}
if (ni == NULL) {
/*
* Permit packets w/ bpf params through regardless
* (see below about sa_len).
*/
if (dst->sa_len == 0)
senderr(EHOSTUNREACH);
ni = ieee80211_ref_node(vap->iv_bss);
}
/*
* Sanitize mbuf for net80211 flags leaked from above.
*
* NB: This must be done before ieee80211_classify as
* it marks EAPOL in frames with M_EAPOL.
*/
m->m_flags &= ~M_80211_TX;
/* calculate priority so drivers can find the tx queue */
/* XXX assumes an 802.3 frame */
if (ieee80211_classify(ni, m))
senderr(EIO); /* XXX */
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
IEEE80211_NODE_STAT(ni, tx_data);
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
IEEE80211_NODE_STAT(ni, tx_mcast);
m->m_flags |= M_MCAST;
} else
IEEE80211_NODE_STAT(ni, tx_ucast);
/* NB: ieee80211_encap does not include 802.11 header */
IEEE80211_NODE_STAT_ADD(ni, tx_bytes, m->m_pkthdr.len);
IEEE80211_TX_LOCK(ic);
/*
* NB: DLT_IEEE802_11_RADIO identifies the parameters are
* present by setting the sa_len field of the sockaddr (yes,
* this is a hack).
* NB: we assume sa_data is suitably aligned to cast.
*/
ret = ieee80211_raw_output(vap, ni, m,
(const struct ieee80211_bpf_params *)(dst->sa_len ?
dst->sa_data : NULL));
IEEE80211_TX_UNLOCK(ic);
return (ret);
bad:
if (m != NULL)
m_freem(m);
if (ni != NULL)
ieee80211_free_node(ni);
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
return error;
#undef senderr
}
/*
* Set the direction field and address fields of an outgoing
* frame. Note this should be called early on in constructing
* a frame as it sets i_fc[1]; other bits can then be or'd in.
*/
void
ieee80211_send_setup(
struct ieee80211_node *ni,
struct mbuf *m,
int type, int tid,
const uint8_t sa[IEEE80211_ADDR_LEN],
const uint8_t da[IEEE80211_ADDR_LEN],
const uint8_t bssid[IEEE80211_ADDR_LEN])
{
#define WH4(wh) ((struct ieee80211_frame_addr4 *)wh)
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_tx_ampdu *tap;
struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
ieee80211_seq seqno;
IEEE80211_TX_LOCK_ASSERT(ni->ni_ic);
wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | type;
if ((type & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA) {
switch (vap->iv_opmode) {
case IEEE80211_M_STA:
wh->i_fc[1] = IEEE80211_FC1_DIR_TODS;
IEEE80211_ADDR_COPY(wh->i_addr1, bssid);
IEEE80211_ADDR_COPY(wh->i_addr2, sa);
IEEE80211_ADDR_COPY(wh->i_addr3, da);
break;
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
IEEE80211_ADDR_COPY(wh->i_addr1, da);
IEEE80211_ADDR_COPY(wh->i_addr2, sa);
IEEE80211_ADDR_COPY(wh->i_addr3, bssid);
break;
case IEEE80211_M_HOSTAP:
wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
IEEE80211_ADDR_COPY(wh->i_addr1, da);
IEEE80211_ADDR_COPY(wh->i_addr2, bssid);
IEEE80211_ADDR_COPY(wh->i_addr3, sa);
break;
case IEEE80211_M_WDS:
wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
IEEE80211_ADDR_COPY(wh->i_addr1, da);
IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
IEEE80211_ADDR_COPY(wh->i_addr3, da);
IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa);
break;
case IEEE80211_M_MBSS:
#ifdef IEEE80211_SUPPORT_MESH
if (IEEE80211_IS_MULTICAST(da)) {
wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
/* XXX next hop */
IEEE80211_ADDR_COPY(wh->i_addr1, da);
IEEE80211_ADDR_COPY(wh->i_addr2,
vap->iv_myaddr);
} else {
wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
IEEE80211_ADDR_COPY(wh->i_addr1, da);
IEEE80211_ADDR_COPY(wh->i_addr2,
vap->iv_myaddr);
IEEE80211_ADDR_COPY(wh->i_addr3, da);
IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, sa);
}
#endif
break;
case IEEE80211_M_MONITOR: /* NB: to quiet compiler */
break;
}
} else {
wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
IEEE80211_ADDR_COPY(wh->i_addr1, da);
IEEE80211_ADDR_COPY(wh->i_addr2, sa);
#ifdef IEEE80211_SUPPORT_MESH
if (vap->iv_opmode == IEEE80211_M_MBSS)
IEEE80211_ADDR_COPY(wh->i_addr3, sa);
else
#endif
IEEE80211_ADDR_COPY(wh->i_addr3, bssid);
}
*(uint16_t *)&wh->i_dur[0] = 0;
tap = &ni->ni_tx_ampdu[tid];
if (tid != IEEE80211_NONQOS_TID && IEEE80211_AMPDU_RUNNING(tap))
m->m_flags |= M_AMPDU_MPDU;
else {
if (IEEE80211_HAS_SEQ(type & IEEE80211_FC0_TYPE_MASK,
type & IEEE80211_FC0_SUBTYPE_MASK))
seqno = ni->ni_txseqs[tid]++;
else
seqno = 0;
*(uint16_t *)&wh->i_seq[0] =
htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
M_SEQNO_SET(m, seqno);
}
if (IEEE80211_IS_MULTICAST(wh->i_addr1))
m->m_flags |= M_MCAST;
#undef WH4
}
/*
* Send a management frame to the specified node. The node pointer
* must have a reference as the pointer will be passed to the driver
* and potentially held for a long time. If the frame is successfully
* dispatched to the driver, then it is responsible for freeing the
* reference (and potentially free'ing up any associated storage);
* otherwise deal with reclaiming any reference (on error).
*/
int
ieee80211_mgmt_output(struct ieee80211_node *ni, struct mbuf *m, int type,
struct ieee80211_bpf_params *params)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_frame *wh;
int ret;
KASSERT(ni != NULL, ("null node"));
if (vap->iv_state == IEEE80211_S_CAC) {
IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH,
ni, "block %s frame in CAC state",
ieee80211_mgt_subtype_name[
(type & IEEE80211_FC0_SUBTYPE_MASK) >>
IEEE80211_FC0_SUBTYPE_SHIFT]);
vap->iv_stats.is_tx_badstate++;
ieee80211_free_node(ni);
m_freem(m);
return EIO; /* XXX */
}
M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT);
if (m == NULL) {
ieee80211_free_node(ni);
return ENOMEM;
}
IEEE80211_TX_LOCK(ic);
wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_MGT | type, IEEE80211_NONQOS_TID,
vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid);
if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_AUTH, wh->i_addr1,
"encrypting frame (%s)", __func__);
wh->i_fc[1] |= IEEE80211_FC1_PROTECTED;
}
m->m_flags |= M_ENCAP; /* mark encapsulated */
KASSERT(type != IEEE80211_FC0_SUBTYPE_PROBE_RESP, ("probe response?"));
M_WME_SETAC(m, params->ibp_pri);
#ifdef IEEE80211_DEBUG
/* avoid printing too many frames */
if ((ieee80211_msg_debug(vap) && doprint(vap, type)) ||
ieee80211_msg_dumppkts(vap)) {
printf("[%s] send %s on channel %u\n",
ether_sprintf(wh->i_addr1),
ieee80211_mgt_subtype_name[
(type & IEEE80211_FC0_SUBTYPE_MASK) >>
IEEE80211_FC0_SUBTYPE_SHIFT],
ieee80211_chan2ieee(ic, ic->ic_curchan));
}
#endif
IEEE80211_NODE_STAT(ni, tx_mgmt);
ret = ieee80211_raw_output(vap, ni, m, params);
IEEE80211_TX_UNLOCK(ic);
return (ret);
}
/*
* Send a null data frame to the specified node. If the station
* is setup for QoS then a QoS Null Data frame is constructed.
* If this is a WDS station then a 4-address frame is constructed.
*
* NB: the caller is assumed to have setup a node reference
* for use; this is necessary to deal with a race condition
* when probing for inactive stations. Like ieee80211_mgmt_output
* we must cleanup any node reference on error; however we
* can safely just unref it as we know it will never be the
* last reference to the node.
*/
int
ieee80211_send_nulldata(struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct mbuf *m;
struct ieee80211_frame *wh;
int hdrlen;
uint8_t *frm;
int ret;
if (vap->iv_state == IEEE80211_S_CAC) {
IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT | IEEE80211_MSG_DOTH,
ni, "block %s frame in CAC state", "null data");
ieee80211_unref_node(&ni);
vap->iv_stats.is_tx_badstate++;
return EIO; /* XXX */
}
if (ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT))
hdrlen = sizeof(struct ieee80211_qosframe);
else
hdrlen = sizeof(struct ieee80211_frame);
/* NB: only WDS vap's get 4-address frames */
if (vap->iv_opmode == IEEE80211_M_WDS)
hdrlen += IEEE80211_ADDR_LEN;
if (ic->ic_flags & IEEE80211_F_DATAPAD)
hdrlen = roundup(hdrlen, sizeof(uint32_t));
m = ieee80211_getmgtframe(&frm, ic->ic_headroom + hdrlen, 0);
if (m == NULL) {
/* XXX debug msg */
ieee80211_unref_node(&ni);
vap->iv_stats.is_tx_nobuf++;
return ENOMEM;
}
KASSERT(M_LEADINGSPACE(m) >= hdrlen,
("leading space %zd", M_LEADINGSPACE(m)));
M_PREPEND(m, hdrlen, M_NOWAIT);
if (m == NULL) {
/* NB: cannot happen */
ieee80211_free_node(ni);
return ENOMEM;
}
IEEE80211_TX_LOCK(ic);
wh = mtod(m, struct ieee80211_frame *); /* NB: a little lie */
if (ni->ni_flags & IEEE80211_NODE_QOS) {
const int tid = WME_AC_TO_TID(WME_AC_BE);
uint8_t *qos;
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS_NULL,
tid, vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid);
if (vap->iv_opmode == IEEE80211_M_WDS)
qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos;
else
qos = ((struct ieee80211_qosframe *) wh)->i_qos;
qos[0] = tid & IEEE80211_QOS_TID;
if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[WME_AC_BE].wmep_noackPolicy)
qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK;
qos[1] = 0;
} else {
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_NODATA,
IEEE80211_NONQOS_TID,
vap->iv_myaddr, ni->ni_macaddr, ni->ni_bssid);
}
if (vap->iv_opmode != IEEE80211_M_WDS) {
/* NB: power management bit is never sent by an AP */
if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
vap->iv_opmode != IEEE80211_M_HOSTAP)
wh->i_fc[1] |= IEEE80211_FC1_PWR_MGT;
}
m->m_len = m->m_pkthdr.len = hdrlen;
m->m_flags |= M_ENCAP; /* mark encapsulated */
M_WME_SETAC(m, WME_AC_BE);
IEEE80211_NODE_STAT(ni, tx_data);
IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS, ni,
"send %snull data frame on channel %u, pwr mgt %s",
ni->ni_flags & IEEE80211_NODE_QOS ? "QoS " : "",
ieee80211_chan2ieee(ic, ic->ic_curchan),
wh->i_fc[1] & IEEE80211_FC1_PWR_MGT ? "ena" : "dis");
ret = ieee80211_raw_output(vap, ni, m, NULL);
IEEE80211_TX_UNLOCK(ic);
return (ret);
}
/*
* Assign priority to a frame based on any vlan tag assigned
* to the station and/or any Diffserv setting in an IP header.
* Finally, if an ACM policy is setup (in station mode) it's
* applied.
*/
int
ieee80211_classify(struct ieee80211_node *ni, struct mbuf *m)
{
const struct ether_header *eh = mtod(m, struct ether_header *);
int v_wme_ac, d_wme_ac, ac;
/*
* Always promote PAE/EAPOL frames to high priority.
*/
if (eh->ether_type == htons(ETHERTYPE_PAE)) {
/* NB: mark so others don't need to check header */
m->m_flags |= M_EAPOL;
ac = WME_AC_VO;
goto done;
}
/*
* Non-qos traffic goes to BE.
*/
if ((ni->ni_flags & IEEE80211_NODE_QOS) == 0) {
ac = WME_AC_BE;
goto done;
}
/*
* If node has a vlan tag then all traffic
* to it must have a matching tag.
*/
v_wme_ac = 0;
if (ni->ni_vlan != 0) {
if ((m->m_flags & M_VLANTAG) == 0) {
IEEE80211_NODE_STAT(ni, tx_novlantag);
return 1;
}
if (EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) !=
EVL_VLANOFTAG(ni->ni_vlan)) {
IEEE80211_NODE_STAT(ni, tx_vlanmismatch);
return 1;
}
/* map vlan priority to AC */
v_wme_ac = TID_TO_WME_AC(EVL_PRIOFTAG(ni->ni_vlan));
}
/* XXX m_copydata may be too slow for fast path */
#ifdef INET
if (eh->ether_type == htons(ETHERTYPE_IP)) {
uint8_t tos;
/*
* IP frame, map the DSCP bits from the TOS field.
*/
/* NB: ip header may not be in first mbuf */
m_copydata(m, sizeof(struct ether_header) +
offsetof(struct ip, ip_tos), sizeof(tos), &tos);
tos >>= 5; /* NB: ECN + low 3 bits of DSCP */
d_wme_ac = TID_TO_WME_AC(tos);
} else {
#endif /* INET */
#ifdef INET6
if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
uint32_t flow;
uint8_t tos;
/*
* IPv6 frame, map the DSCP bits from the traffic class field.
*/
m_copydata(m, sizeof(struct ether_header) +
offsetof(struct ip6_hdr, ip6_flow), sizeof(flow),
(caddr_t) &flow);
tos = (uint8_t)(ntohl(flow) >> 20);
tos >>= 5; /* NB: ECN + low 3 bits of DSCP */
d_wme_ac = TID_TO_WME_AC(tos);
} else {
#endif /* INET6 */
d_wme_ac = WME_AC_BE;
#ifdef INET6
}
#endif
#ifdef INET
}
#endif
/*
* Use highest priority AC.
*/
if (v_wme_ac > d_wme_ac)
ac = v_wme_ac;
else
ac = d_wme_ac;
/*
* Apply ACM policy.
*/
if (ni->ni_vap->iv_opmode == IEEE80211_M_STA) {
static const int acmap[4] = {
WME_AC_BK, /* WME_AC_BE */
WME_AC_BK, /* WME_AC_BK */
WME_AC_BE, /* WME_AC_VI */
WME_AC_VI, /* WME_AC_VO */
};
struct ieee80211com *ic = ni->ni_ic;
while (ac != WME_AC_BK &&
ic->ic_wme.wme_wmeBssChanParams.cap_wmeParams[ac].wmep_acm)
ac = acmap[ac];
}
done:
M_WME_SETAC(m, ac);
return 0;
}
/*
* Insure there is sufficient contiguous space to encapsulate the
* 802.11 data frame. If room isn't already there, arrange for it.
* Drivers and cipher modules assume we have done the necessary work
* and fail rudely if they don't find the space they need.
*/
struct mbuf *
ieee80211_mbuf_adjust(struct ieee80211vap *vap, int hdrsize,
struct ieee80211_key *key, struct mbuf *m)
{
#define TO_BE_RECLAIMED (sizeof(struct ether_header) - sizeof(struct llc))
int needed_space = vap->iv_ic->ic_headroom + hdrsize;
if (key != NULL) {
/* XXX belongs in crypto code? */
needed_space += key->wk_cipher->ic_header;
/* XXX frags */
/*
* When crypto is being done in the host we must insure
* the data are writable for the cipher routines; clone
* a writable mbuf chain.
* XXX handle SWMIC specially
*/
if (key->wk_flags & (IEEE80211_KEY_SWENCRYPT|IEEE80211_KEY_SWENMIC)) {
m = m_unshare(m, M_NOWAIT);
if (m == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
"%s: cannot get writable mbuf\n", __func__);
vap->iv_stats.is_tx_nobuf++; /* XXX new stat */
return NULL;
}
}
}
/*
* We know we are called just before stripping an Ethernet
* header and prepending an LLC header. This means we know
* there will be
* sizeof(struct ether_header) - sizeof(struct llc)
* bytes recovered to which we need additional space for the
* 802.11 header and any crypto header.
*/
/* XXX check trailing space and copy instead? */
if (M_LEADINGSPACE(m) < needed_space - TO_BE_RECLAIMED) {
struct mbuf *n = m_gethdr(M_NOWAIT, m->m_type);
if (n == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_OUTPUT,
"%s: cannot expand storage\n", __func__);
vap->iv_stats.is_tx_nobuf++;
m_freem(m);
return NULL;
}
KASSERT(needed_space <= MHLEN,
("not enough room, need %u got %d\n", needed_space, MHLEN));
/*
* Setup new mbuf to have leading space to prepend the
* 802.11 header and any crypto header bits that are
* required (the latter are added when the driver calls
* back to ieee80211_crypto_encap to do crypto encapsulation).
*/
/* NB: must be first 'cuz it clobbers m_data */
m_move_pkthdr(n, m);
n->m_len = 0; /* NB: m_gethdr does not set */
n->m_data += needed_space;
/*
* Pull up Ethernet header to create the expected layout.
* We could use m_pullup but that's overkill (i.e. we don't
* need the actual data) and it cannot fail so do it inline
* for speed.
*/
/* NB: struct ether_header is known to be contiguous */
n->m_len += sizeof(struct ether_header);
m->m_len -= sizeof(struct ether_header);
m->m_data += sizeof(struct ether_header);
/*
* Replace the head of the chain.
*/
n->m_next = m;
m = n;
}
return m;
#undef TO_BE_RECLAIMED
}
/*
* Return the transmit key to use in sending a unicast frame.
* If a unicast key is set we use that. When no unicast key is set
* we fall back to the default transmit key.
*/
static __inline struct ieee80211_key *
ieee80211_crypto_getucastkey(struct ieee80211vap *vap,
struct ieee80211_node *ni)
{
if (IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)) {
if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE ||
IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey]))
return NULL;
return &vap->iv_nw_keys[vap->iv_def_txkey];
} else {
return &ni->ni_ucastkey;
}
}
/*
* Return the transmit key to use in sending a multicast frame.
* Multicast traffic always uses the group key which is installed as
* the default tx key.
*/
static __inline struct ieee80211_key *
ieee80211_crypto_getmcastkey(struct ieee80211vap *vap,
struct ieee80211_node *ni)
{
if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE ||
IEEE80211_KEY_UNDEFINED(&vap->iv_nw_keys[vap->iv_def_txkey]))
return NULL;
return &vap->iv_nw_keys[vap->iv_def_txkey];
}
/*
* Encapsulate an outbound data frame. The mbuf chain is updated.
* If an error is encountered NULL is returned. The caller is required
* to provide a node reference and pullup the ethernet header in the
* first mbuf.
*
* NB: Packet is assumed to be processed by ieee80211_classify which
* marked EAPOL frames w/ M_EAPOL.
*/
struct mbuf *
ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
struct mbuf *m)
{
#define WH4(wh) ((struct ieee80211_frame_addr4 *)(wh))
#define MC01(mc) ((struct ieee80211_meshcntl_ae01 *)mc)
struct ieee80211com *ic = ni->ni_ic;
#ifdef IEEE80211_SUPPORT_MESH
struct ieee80211_mesh_state *ms = vap->iv_mesh;
struct ieee80211_meshcntl_ae10 *mc;
struct ieee80211_mesh_route *rt = NULL;
int dir = -1;
#endif
struct ether_header eh;
struct ieee80211_frame *wh;
struct ieee80211_key *key;
struct llc *llc;
int hdrsize, hdrspace, datalen, addqos, txfrag, is4addr;
ieee80211_seq seqno;
int meshhdrsize, meshae;
uint8_t *qos;
IEEE80211_TX_LOCK_ASSERT(ic);
/*
* Copy existing Ethernet header to a safe place. The
* rest of the code assumes it's ok to strip it when
* reorganizing state for the final encapsulation.
*/
KASSERT(m->m_len >= sizeof(eh), ("no ethernet header!"));
ETHER_HEADER_COPY(&eh, mtod(m, caddr_t));
/*
* Insure space for additional headers. First identify
* transmit key to use in calculating any buffer adjustments
* required. This is also used below to do privacy
* encapsulation work. Then calculate the 802.11 header
* size and any padding required by the driver.
*
* Note key may be NULL if we fall back to the default
* transmit key and that is not set. In that case the
* buffer may not be expanded as needed by the cipher
* routines, but they will/should discard it.
*/
if (vap->iv_flags & IEEE80211_F_PRIVACY) {
if (vap->iv_opmode == IEEE80211_M_STA ||
!IEEE80211_IS_MULTICAST(eh.ether_dhost) ||
(vap->iv_opmode == IEEE80211_M_WDS &&
(vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY)))
key = ieee80211_crypto_getucastkey(vap, ni);
else
key = ieee80211_crypto_getmcastkey(vap, ni);
if (key == NULL && (m->m_flags & M_EAPOL) == 0) {
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO,
eh.ether_dhost,
"no default transmit key (%s) deftxkey %u",
__func__, vap->iv_def_txkey);
vap->iv_stats.is_tx_nodefkey++;
goto bad;
}
} else
key = NULL;
/*
* XXX Some ap's don't handle QoS-encapsulated EAPOL
* frames so suppress use. This may be an issue if other
* ap's require all data frames to be QoS-encapsulated
* once negotiated in which case we'll need to make this
* configurable.
* NB: mesh data frames are QoS.
*/
addqos = ((ni->ni_flags & (IEEE80211_NODE_QOS|IEEE80211_NODE_HT)) ||
(vap->iv_opmode == IEEE80211_M_MBSS)) &&
(m->m_flags & M_EAPOL) == 0;
if (addqos)
hdrsize = sizeof(struct ieee80211_qosframe);
else
hdrsize = sizeof(struct ieee80211_frame);
#ifdef IEEE80211_SUPPORT_MESH
if (vap->iv_opmode == IEEE80211_M_MBSS) {
/*
* Mesh data frames are encapsulated according to the
* rules of Section 11B.8.5 (p.139 of D3.0 spec).
* o Group Addressed data (aka multicast) originating
* at the local sta are sent w/ 3-address format and
* address extension mode 00
* o Individually Addressed data (aka unicast) originating
* at the local sta are sent w/ 4-address format and
* address extension mode 00
* o Group Addressed data forwarded from a non-mesh sta are
* sent w/ 3-address format and address extension mode 01
* o Individually Address data from another sta are sent
* w/ 4-address format and address extension mode 10
*/
is4addr = 0; /* NB: don't use, disable */
if (!IEEE80211_IS_MULTICAST(eh.ether_dhost)) {
rt = ieee80211_mesh_rt_find(vap, eh.ether_dhost);
KASSERT(rt != NULL, ("route is NULL"));
dir = IEEE80211_FC1_DIR_DSTODS;
hdrsize += IEEE80211_ADDR_LEN;
if (rt->rt_flags & IEEE80211_MESHRT_FLAGS_PROXY) {
if (IEEE80211_ADDR_EQ(rt->rt_mesh_gate,
vap->iv_myaddr)) {
IEEE80211_NOTE_MAC(vap,
IEEE80211_MSG_MESH,
eh.ether_dhost,
"%s", "trying to send to ourself");
goto bad;
}
meshae = IEEE80211_MESH_AE_10;
meshhdrsize =
sizeof(struct ieee80211_meshcntl_ae10);
} else {
meshae = IEEE80211_MESH_AE_00;
meshhdrsize =
sizeof(struct ieee80211_meshcntl);
}
} else {
dir = IEEE80211_FC1_DIR_FROMDS;
if (!IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr)) {
/* proxy group */
meshae = IEEE80211_MESH_AE_01;
meshhdrsize =
sizeof(struct ieee80211_meshcntl_ae01);
} else {
/* group */
meshae = IEEE80211_MESH_AE_00;
meshhdrsize = sizeof(struct ieee80211_meshcntl);
}
}
} else {
#endif
/*
* 4-address frames need to be generated for:
* o packets sent through a WDS vap (IEEE80211_M_WDS)
* o packets sent through a vap marked for relaying
* (e.g. a station operating with dynamic WDS)
*/
is4addr = vap->iv_opmode == IEEE80211_M_WDS ||
((vap->iv_flags_ext & IEEE80211_FEXT_4ADDR) &&
!IEEE80211_ADDR_EQ(eh.ether_shost, vap->iv_myaddr));
if (is4addr)
hdrsize += IEEE80211_ADDR_LEN;
meshhdrsize = meshae = 0;
#ifdef IEEE80211_SUPPORT_MESH
}
#endif
/*
* Honor driver DATAPAD requirement.
*/
if (ic->ic_flags & IEEE80211_F_DATAPAD)
hdrspace = roundup(hdrsize, sizeof(uint32_t));
else
hdrspace = hdrsize;
if (__predict_true((m->m_flags & M_FF) == 0)) {
/*
* Normal frame.
*/
m = ieee80211_mbuf_adjust(vap, hdrspace + meshhdrsize, key, m);
if (m == NULL) {
/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
goto bad;
}
/* NB: this could be optimized 'cuz of ieee80211_mbuf_adjust */
m_adj(m, sizeof(struct ether_header) - sizeof(struct llc));
llc = mtod(m, struct llc *);
llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
llc->llc_control = LLC_UI;
llc->llc_snap.org_code[0] = 0;
llc->llc_snap.org_code[1] = 0;
llc->llc_snap.org_code[2] = 0;
llc->llc_snap.ether_type = eh.ether_type;
} else {
#ifdef IEEE80211_SUPPORT_SUPERG
/*
* Aggregated frame.
*/
m = ieee80211_ff_encap(vap, m, hdrspace + meshhdrsize, key);
if (m == NULL)
#endif
goto bad;
}
datalen = m->m_pkthdr.len; /* NB: w/o 802.11 header */
M_PREPEND(m, hdrspace + meshhdrsize, M_NOWAIT);
if (m == NULL) {
vap->iv_stats.is_tx_nobuf++;
goto bad;
}
wh = mtod(m, struct ieee80211_frame *);
wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA;
*(uint16_t *)wh->i_dur = 0;
qos = NULL; /* NB: quiet compiler */
if (is4addr) {
wh->i_fc[1] = IEEE80211_FC1_DIR_DSTODS;
IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr);
IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost);
IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, eh.ether_shost);
} else switch (vap->iv_opmode) {
case IEEE80211_M_STA:
wh->i_fc[1] = IEEE80211_FC1_DIR_TODS;
IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_bssid);
IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost);
IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_dhost);
break;
case IEEE80211_M_IBSS:
case IEEE80211_M_AHDEMO:
wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost);
IEEE80211_ADDR_COPY(wh->i_addr2, eh.ether_shost);
/*
* NB: always use the bssid from iv_bss as the
* neighbor's may be stale after an ibss merge
*/
IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bss->ni_bssid);
break;
case IEEE80211_M_HOSTAP:
wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost);
IEEE80211_ADDR_COPY(wh->i_addr2, ni->ni_bssid);
IEEE80211_ADDR_COPY(wh->i_addr3, eh.ether_shost);
break;
#ifdef IEEE80211_SUPPORT_MESH
case IEEE80211_M_MBSS:
/* NB: offset by hdrspace to deal with DATAPAD */
mc = (struct ieee80211_meshcntl_ae10 *)
(mtod(m, uint8_t *) + hdrspace);
wh->i_fc[1] = dir;
switch (meshae) {
case IEEE80211_MESH_AE_00: /* no proxy */
mc->mc_flags = 0;
if (dir == IEEE80211_FC1_DIR_DSTODS) { /* ucast */
IEEE80211_ADDR_COPY(wh->i_addr1,
ni->ni_macaddr);
IEEE80211_ADDR_COPY(wh->i_addr2,
vap->iv_myaddr);
IEEE80211_ADDR_COPY(wh->i_addr3,
eh.ether_dhost);
IEEE80211_ADDR_COPY(WH4(wh)->i_addr4,
eh.ether_shost);
qos =((struct ieee80211_qosframe_addr4 *)
wh)->i_qos;
} else if (dir == IEEE80211_FC1_DIR_FROMDS) {
/* mcast */
IEEE80211_ADDR_COPY(wh->i_addr1,
eh.ether_dhost);
IEEE80211_ADDR_COPY(wh->i_addr2,
vap->iv_myaddr);
IEEE80211_ADDR_COPY(wh->i_addr3,
eh.ether_shost);
qos = ((struct ieee80211_qosframe *)
wh)->i_qos;
}
break;
case IEEE80211_MESH_AE_01: /* mcast, proxy */
wh->i_fc[1] = IEEE80211_FC1_DIR_FROMDS;
IEEE80211_ADDR_COPY(wh->i_addr1, eh.ether_dhost);
IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_myaddr);
mc->mc_flags = 1;
IEEE80211_ADDR_COPY(MC01(mc)->mc_addr4,
eh.ether_shost);
qos = ((struct ieee80211_qosframe *) wh)->i_qos;
break;
case IEEE80211_MESH_AE_10: /* ucast, proxy */
KASSERT(rt != NULL, ("route is NULL"));
IEEE80211_ADDR_COPY(wh->i_addr1, rt->rt_nexthop);
IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
IEEE80211_ADDR_COPY(wh->i_addr3, rt->rt_mesh_gate);
IEEE80211_ADDR_COPY(WH4(wh)->i_addr4, vap->iv_myaddr);
mc->mc_flags = IEEE80211_MESH_AE_10;
IEEE80211_ADDR_COPY(mc->mc_addr5, eh.ether_dhost);
IEEE80211_ADDR_COPY(mc->mc_addr6, eh.ether_shost);
qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos;
break;
default:
KASSERT(0, ("meshae %d", meshae));
break;
}
mc->mc_ttl = ms->ms_ttl;
ms->ms_seq++;
LE_WRITE_4(mc->mc_seq, ms->ms_seq);
break;
#endif
case IEEE80211_M_WDS: /* NB: is4addr should always be true */
default:
goto bad;
}
if (m->m_flags & M_MORE_DATA)
wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
if (addqos) {
int ac, tid;
if (is4addr) {
qos = ((struct ieee80211_qosframe_addr4 *) wh)->i_qos;
/* NB: mesh case handled earlier */
} else if (vap->iv_opmode != IEEE80211_M_MBSS)
qos = ((struct ieee80211_qosframe *) wh)->i_qos;
ac = M_WME_GETAC(m);
/* map from access class/queue to 11e header priorty value */
tid = WME_AC_TO_TID(ac);
qos[0] = tid & IEEE80211_QOS_TID;
if (ic->ic_wme.wme_wmeChanParams.cap_wmeParams[ac].wmep_noackPolicy)
qos[0] |= IEEE80211_QOS_ACKPOLICY_NOACK;
#ifdef IEEE80211_SUPPORT_MESH
if (vap->iv_opmode == IEEE80211_M_MBSS)
qos[1] = IEEE80211_QOS_MC;
else
#endif
qos[1] = 0;
wh->i_fc[0] |= IEEE80211_FC0_SUBTYPE_QOS;
if ((m->m_flags & M_AMPDU_MPDU) == 0) {
/*
* NB: don't assign a sequence # to potential
* aggregates; we expect this happens at the
* point the frame comes off any aggregation q
* as otherwise we may introduce holes in the
* BA sequence space and/or make window accouting
* more difficult.
*
* XXX may want to control this with a driver
* capability; this may also change when we pull
* aggregation up into net80211
*/
seqno = ni->ni_txseqs[tid]++;
*(uint16_t *)wh->i_seq =
htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
M_SEQNO_SET(m, seqno);
}
} else {
seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++;
*(uint16_t *)wh->i_seq =
htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
M_SEQNO_SET(m, seqno);
}
/* check if xmit fragmentation is required */
txfrag = (m->m_pkthdr.len > vap->iv_fragthreshold &&
!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
(vap->iv_caps & IEEE80211_C_TXFRAG) &&
(m->m_flags & (M_FF | M_AMPDU_MPDU)) == 0);
if (key != NULL) {
/*
* IEEE 802.1X: send EAPOL frames always in the clear.
* WPA/WPA2: encrypt EAPOL keys when pairwise keys are set.
*/
if ((m->m_flags & M_EAPOL) == 0 ||
((vap->iv_flags & IEEE80211_F_WPA) &&
(vap->iv_opmode == IEEE80211_M_STA ?
!IEEE80211_KEY_UNDEFINED(key) :
!IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)))) {
wh->i_fc[1] |= IEEE80211_FC1_PROTECTED;
if (!ieee80211_crypto_enmic(vap, key, m, txfrag)) {
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT,
eh.ether_dhost,
"%s", "enmic failed, discard frame");
vap->iv_stats.is_crypto_enmicfail++;
goto bad;
}
}
}
if (txfrag && !ieee80211_fragment(vap, m, hdrsize,
key != NULL ? key->wk_cipher->ic_header : 0, vap->iv_fragthreshold))
goto bad;
m->m_flags |= M_ENCAP; /* mark encapsulated */
IEEE80211_NODE_STAT(ni, tx_data);
if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
IEEE80211_NODE_STAT(ni, tx_mcast);
m->m_flags |= M_MCAST;
} else
IEEE80211_NODE_STAT(ni, tx_ucast);
IEEE80211_NODE_STAT_ADD(ni, tx_bytes, datalen);
return m;
bad:
if (m != NULL)
m_freem(m);
return NULL;
#undef WH4
#undef MC01
}
/*
* Fragment the frame according to the specified mtu.
* The size of the 802.11 header (w/o padding) is provided
* so we don't need to recalculate it. We create a new
* mbuf for each fragment and chain it through m_nextpkt;
* we might be able to optimize this by reusing the original
* packet's mbufs but that is significantly more complicated.
*/
static int
ieee80211_fragment(struct ieee80211vap *vap, struct mbuf *m0,
u_int hdrsize, u_int ciphdrsize, u_int mtu)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_frame *wh, *whf;
struct mbuf *m, *prev, *next;
u_int totalhdrsize, fragno, fragsize, off, remainder, payload;
u_int hdrspace;
KASSERT(m0->m_nextpkt == NULL, ("mbuf already chained?"));
KASSERT(m0->m_pkthdr.len > mtu,
("pktlen %u mtu %u", m0->m_pkthdr.len, mtu));
/*
* Honor driver DATAPAD requirement.
*/
if (ic->ic_flags & IEEE80211_F_DATAPAD)
hdrspace = roundup(hdrsize, sizeof(uint32_t));
else
hdrspace = hdrsize;
wh = mtod(m0, struct ieee80211_frame *);
/* NB: mark the first frag; it will be propagated below */
wh->i_fc[1] |= IEEE80211_FC1_MORE_FRAG;
totalhdrsize = hdrspace + ciphdrsize;
fragno = 1;
off = mtu - ciphdrsize;
remainder = m0->m_pkthdr.len - off;
prev = m0;
do {
fragsize = totalhdrsize + remainder;
if (fragsize > mtu)
fragsize = mtu;
/* XXX fragsize can be >2048! */
KASSERT(fragsize < MCLBYTES,
("fragment size %u too big!", fragsize));
if (fragsize > MHLEN)
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
else
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL)
goto bad;
/* leave room to prepend any cipher header */
m_align(m, fragsize - ciphdrsize);
/*
* Form the header in the fragment. Note that since
* we mark the first fragment with the MORE_FRAG bit
* it automatically is propagated to each fragment; we
* need only clear it on the last fragment (done below).
* NB: frag 1+ dont have Mesh Control field present.
*/
whf = mtod(m, struct ieee80211_frame *);
memcpy(whf, wh, hdrsize);
#ifdef IEEE80211_SUPPORT_MESH
if (vap->iv_opmode == IEEE80211_M_MBSS) {
if (IEEE80211_IS_DSTODS(wh))
((struct ieee80211_qosframe_addr4 *)
whf)->i_qos[1] &= ~IEEE80211_QOS_MC;
else
((struct ieee80211_qosframe *)
whf)->i_qos[1] &= ~IEEE80211_QOS_MC;
}
#endif
*(uint16_t *)&whf->i_seq[0] |= htole16(
(fragno & IEEE80211_SEQ_FRAG_MASK) <<
IEEE80211_SEQ_FRAG_SHIFT);
fragno++;
payload = fragsize - totalhdrsize;
/* NB: destination is known to be contiguous */
m_copydata(m0, off, payload, mtod(m, uint8_t *) + hdrspace);
m->m_len = hdrspace + payload;
m->m_pkthdr.len = hdrspace + payload;
m->m_flags |= M_FRAG;
/* chain up the fragment */
prev->m_nextpkt = m;
prev = m;
/* deduct fragment just formed */
remainder -= payload;
off += payload;
} while (remainder != 0);
/* set the last fragment */
m->m_flags |= M_LASTFRAG;
whf->i_fc[1] &= ~IEEE80211_FC1_MORE_FRAG;
/* strip first mbuf now that everything has been copied */
m_adj(m0, -(m0->m_pkthdr.len - (mtu - ciphdrsize)));
m0->m_flags |= M_FIRSTFRAG | M_FRAG;
vap->iv_stats.is_tx_fragframes++;
vap->iv_stats.is_tx_frags += fragno-1;
return 1;
bad:
/* reclaim fragments but leave original frame for caller to free */
for (m = m0->m_nextpkt; m != NULL; m = next) {
next = m->m_nextpkt;
m->m_nextpkt = NULL; /* XXX paranoid */
m_freem(m);
}
m0->m_nextpkt = NULL;
return 0;
}
/*
* Add a supported rates element id to a frame.
*/
uint8_t *
ieee80211_add_rates(uint8_t *frm, const struct ieee80211_rateset *rs)
{
int nrates;
*frm++ = IEEE80211_ELEMID_RATES;
nrates = rs->rs_nrates;
if (nrates > IEEE80211_RATE_SIZE)
nrates = IEEE80211_RATE_SIZE;
*frm++ = nrates;
memcpy(frm, rs->rs_rates, nrates);
return frm + nrates;
}
/*
* Add an extended supported rates element id to a frame.
*/
uint8_t *
ieee80211_add_xrates(uint8_t *frm, const struct ieee80211_rateset *rs)
{
/*
* Add an extended supported rates element if operating in 11g mode.
*/
if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
int nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
*frm++ = IEEE80211_ELEMID_XRATES;
*frm++ = nrates;
memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
frm += nrates;
}
return frm;
}
/*
* Add an ssid element to a frame.
*/
uint8_t *
ieee80211_add_ssid(uint8_t *frm, const uint8_t *ssid, u_int len)
{
*frm++ = IEEE80211_ELEMID_SSID;
*frm++ = len;
memcpy(frm, ssid, len);
return frm + len;
}
/*
* Add an erp element to a frame.
*/
static uint8_t *
ieee80211_add_erp(uint8_t *frm, struct ieee80211com *ic)
{
uint8_t erp;
*frm++ = IEEE80211_ELEMID_ERP;
*frm++ = 1;
erp = 0;
if (ic->ic_nonerpsta != 0)
erp |= IEEE80211_ERP_NON_ERP_PRESENT;
if (ic->ic_flags & IEEE80211_F_USEPROT)
erp |= IEEE80211_ERP_USE_PROTECTION;
if (ic->ic_flags & IEEE80211_F_USEBARKER)
erp |= IEEE80211_ERP_LONG_PREAMBLE;
*frm++ = erp;
return frm;
}
/*
* Add a CFParams element to a frame.
*/
static uint8_t *
ieee80211_add_cfparms(uint8_t *frm, struct ieee80211com *ic)
{
#define ADDSHORT(frm, v) do { \
LE_WRITE_2(frm, v); \
frm += 2; \
} while (0)
*frm++ = IEEE80211_ELEMID_CFPARMS;
*frm++ = 6;
*frm++ = 0; /* CFP count */
*frm++ = 2; /* CFP period */
ADDSHORT(frm, 0); /* CFP MaxDuration (TU) */
ADDSHORT(frm, 0); /* CFP CurRemaining (TU) */
return frm;
#undef ADDSHORT
}
static __inline uint8_t *
add_appie(uint8_t *frm, const struct ieee80211_appie *ie)
{
memcpy(frm, ie->ie_data, ie->ie_len);
return frm + ie->ie_len;
}
static __inline uint8_t *
add_ie(uint8_t *frm, const uint8_t *ie)
{
memcpy(frm, ie, 2 + ie[1]);
return frm + 2 + ie[1];
}
#define WME_OUI_BYTES 0x00, 0x50, 0xf2
/*
* Add a WME information element to a frame.
*/
static uint8_t *
ieee80211_add_wme_info(uint8_t *frm, struct ieee80211_wme_state *wme)
{
static const struct ieee80211_wme_info info = {
.wme_id = IEEE80211_ELEMID_VENDOR,
.wme_len = sizeof(struct ieee80211_wme_info) - 2,
.wme_oui = { WME_OUI_BYTES },
.wme_type = WME_OUI_TYPE,
.wme_subtype = WME_INFO_OUI_SUBTYPE,
.wme_version = WME_VERSION,
.wme_info = 0,
};
memcpy(frm, &info, sizeof(info));
return frm + sizeof(info);
}
/*
* Add a WME parameters element to a frame.
*/
static uint8_t *
ieee80211_add_wme_param(uint8_t *frm, struct ieee80211_wme_state *wme)
{
#define SM(_v, _f) (((_v) << _f##_S) & _f)
#define ADDSHORT(frm, v) do { \
LE_WRITE_2(frm, v); \
frm += 2; \
} while (0)
/* NB: this works 'cuz a param has an info at the front */
static const struct ieee80211_wme_info param = {
.wme_id = IEEE80211_ELEMID_VENDOR,
.wme_len = sizeof(struct ieee80211_wme_param) - 2,
.wme_oui = { WME_OUI_BYTES },
.wme_type = WME_OUI_TYPE,
.wme_subtype = WME_PARAM_OUI_SUBTYPE,
.wme_version = WME_VERSION,
};
int i;
memcpy(frm, &param, sizeof(param));
frm += __offsetof(struct ieee80211_wme_info, wme_info);
*frm++ = wme->wme_bssChanParams.cap_info; /* AC info */
*frm++ = 0; /* reserved field */
for (i = 0; i < WME_NUM_AC; i++) {
const struct wmeParams *ac =
&wme->wme_bssChanParams.cap_wmeParams[i];
*frm++ = SM(i, WME_PARAM_ACI)
| SM(ac->wmep_acm, WME_PARAM_ACM)
| SM(ac->wmep_aifsn, WME_PARAM_AIFSN)
;
*frm++ = SM(ac->wmep_logcwmax, WME_PARAM_LOGCWMAX)
| SM(ac->wmep_logcwmin, WME_PARAM_LOGCWMIN)
;
ADDSHORT(frm, ac->wmep_txopLimit);
}
return frm;
#undef SM
#undef ADDSHORT
}
#undef WME_OUI_BYTES
/*
* Add an 11h Power Constraint element to a frame.
*/
static uint8_t *
ieee80211_add_powerconstraint(uint8_t *frm, struct ieee80211vap *vap)
{
const struct ieee80211_channel *c = vap->iv_bss->ni_chan;
/* XXX per-vap tx power limit? */
int8_t limit = vap->iv_ic->ic_txpowlimit / 2;
frm[0] = IEEE80211_ELEMID_PWRCNSTR;
frm[1] = 1;
frm[2] = c->ic_maxregpower > limit ? c->ic_maxregpower - limit : 0;
return frm + 3;
}
/*
* Add an 11h Power Capability element to a frame.
*/
static uint8_t *
ieee80211_add_powercapability(uint8_t *frm, const struct ieee80211_channel *c)
{
frm[0] = IEEE80211_ELEMID_PWRCAP;
frm[1] = 2;
frm[2] = c->ic_minpower;
frm[3] = c->ic_maxpower;
return frm + 4;
}
/*
* Add an 11h Supported Channels element to a frame.
*/
static uint8_t *
ieee80211_add_supportedchannels(uint8_t *frm, struct ieee80211com *ic)
{
static const int ielen = 26;
frm[0] = IEEE80211_ELEMID_SUPPCHAN;
frm[1] = ielen;
/* XXX not correct */
memcpy(frm+2, ic->ic_chan_avail, ielen);
return frm + 2 + ielen;
}
/*
* Add an 11h Quiet time element to a frame.
*/
static uint8_t *
ieee80211_add_quiet(uint8_t *frm, struct ieee80211vap *vap)
{
struct ieee80211_quiet_ie *quiet = (struct ieee80211_quiet_ie *) frm;
quiet->quiet_ie = IEEE80211_ELEMID_QUIET;
quiet->len = 6;
if (vap->iv_quiet_count_value == 1)
vap->iv_quiet_count_value = vap->iv_quiet_count;
else if (vap->iv_quiet_count_value > 1)
vap->iv_quiet_count_value--;
if (vap->iv_quiet_count_value == 0) {
/* value 0 is reserved as per 802.11h standerd */
vap->iv_quiet_count_value = 1;
}
quiet->tbttcount = vap->iv_quiet_count_value;
quiet->period = vap->iv_quiet_period;
quiet->duration = htole16(vap->iv_quiet_duration);
quiet->offset = htole16(vap->iv_quiet_offset);
return frm + sizeof(*quiet);
}
/*
* Add an 11h Channel Switch Announcement element to a frame.
* Note that we use the per-vap CSA count to adjust the global
* counter so we can use this routine to form probe response
* frames and get the current count.
*/
static uint8_t *
ieee80211_add_csa(uint8_t *frm, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_csa_ie *csa = (struct ieee80211_csa_ie *) frm;
csa->csa_ie = IEEE80211_ELEMID_CSA;
csa->csa_len = 3;
csa->csa_mode = 1; /* XXX force quiet on channel */
csa->csa_newchan = ieee80211_chan2ieee(ic, ic->ic_csa_newchan);
csa->csa_count = ic->ic_csa_count - vap->iv_csa_count;
return frm + sizeof(*csa);
}
/*
* Add an 11h country information element to a frame.
*/
static uint8_t *
ieee80211_add_countryie(uint8_t *frm, struct ieee80211com *ic)
{
if (ic->ic_countryie == NULL ||
ic->ic_countryie_chan != ic->ic_bsschan) {
/*
* Handle lazy construction of ie. This is done on
* first use and after a channel change that requires
* re-calculation.
*/
if (ic->ic_countryie != NULL)
IEEE80211_FREE(ic->ic_countryie, M_80211_NODE_IE);
ic->ic_countryie = ieee80211_alloc_countryie(ic);
if (ic->ic_countryie == NULL)
return frm;
ic->ic_countryie_chan = ic->ic_bsschan;
}
return add_appie(frm, ic->ic_countryie);
}
uint8_t *
ieee80211_add_wpa(uint8_t *frm, const struct ieee80211vap *vap)
{
if (vap->iv_flags & IEEE80211_F_WPA1 && vap->iv_wpa_ie != NULL)
return (add_ie(frm, vap->iv_wpa_ie));
else {
/* XXX else complain? */
return (frm);
}
}
uint8_t *
ieee80211_add_rsn(uint8_t *frm, const struct ieee80211vap *vap)
{
if (vap->iv_flags & IEEE80211_F_WPA2 && vap->iv_rsn_ie != NULL)
return (add_ie(frm, vap->iv_rsn_ie));
else {
/* XXX else complain? */
return (frm);
}
}
uint8_t *
ieee80211_add_qos(uint8_t *frm, const struct ieee80211_node *ni)
{
if (ni->ni_flags & IEEE80211_NODE_QOS) {
*frm++ = IEEE80211_ELEMID_QOS;
*frm++ = 1;
*frm++ = 0;
}
return (frm);
}
/*
* Send a probe request frame with the specified ssid
* and any optional information element data.
*/
int
ieee80211_send_probereq(struct ieee80211_node *ni,
const uint8_t sa[IEEE80211_ADDR_LEN],
const uint8_t da[IEEE80211_ADDR_LEN],
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t *ssid, size_t ssidlen)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
const struct ieee80211_txparam *tp;
struct ieee80211_bpf_params params;
struct ieee80211_frame *wh;
const struct ieee80211_rateset *rs;
struct mbuf *m;
uint8_t *frm;
int ret;
if (vap->iv_state == IEEE80211_S_CAC) {
IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, ni,
"block %s frame in CAC state", "probe request");
vap->iv_stats.is_tx_badstate++;
return EIO; /* XXX */
}
/*
* Hold a reference on the node so it doesn't go away until after
* the xmit is complete all the way in the driver. On error we
* will remove our reference.
*/
IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
"ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n",
__func__, __LINE__,
ni, ether_sprintf(ni->ni_macaddr),
ieee80211_node_refcnt(ni)+1);
ieee80211_ref_node(ni);
/*
* prreq frame format
* [tlv] ssid
* [tlv] supported rates
* [tlv] RSN (optional)
* [tlv] extended supported rates
* [tlv] WPA (optional)
* [tlv] user-specified ie's
*/
m = ieee80211_getmgtframe(&frm,
ic->ic_headroom + sizeof(struct ieee80211_frame),
2 + IEEE80211_NWID_LEN
+ 2 + IEEE80211_RATE_SIZE
+ sizeof(struct ieee80211_ie_wpa)
+ 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ sizeof(struct ieee80211_ie_wpa)
+ (vap->iv_appie_probereq != NULL ?
vap->iv_appie_probereq->ie_len : 0)
);
if (m == NULL) {
vap->iv_stats.is_tx_nobuf++;
ieee80211_free_node(ni);
return ENOMEM;
}
frm = ieee80211_add_ssid(frm, ssid, ssidlen);
rs = ieee80211_get_suprates(ic, ic->ic_curchan);
frm = ieee80211_add_rates(frm, rs);
frm = ieee80211_add_rsn(frm, vap);
frm = ieee80211_add_xrates(frm, rs);
frm = ieee80211_add_wpa(frm, vap);
if (vap->iv_appie_probereq != NULL)
frm = add_appie(frm, vap->iv_appie_probereq);
m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
KASSERT(M_LEADINGSPACE(m) >= sizeof(struct ieee80211_frame),
("leading space %zd", M_LEADINGSPACE(m)));
M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT);
if (m == NULL) {
/* NB: cannot happen */
ieee80211_free_node(ni);
return ENOMEM;
}
IEEE80211_TX_LOCK(ic);
wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ,
IEEE80211_NONQOS_TID, sa, da, bssid);
/* XXX power management? */
m->m_flags |= M_ENCAP; /* mark encapsulated */
M_WME_SETAC(m, WME_AC_BE);
IEEE80211_NODE_STAT(ni, tx_probereq);
IEEE80211_NODE_STAT(ni, tx_mgmt);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
"send probe req on channel %u bssid %s ssid \"%.*s\"\n",
ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(bssid),
ssidlen, ssid);
memset(&params, 0, sizeof(params));
params.ibp_pri = M_WME_GETAC(m);
tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
params.ibp_rate0 = tp->mgmtrate;
if (IEEE80211_IS_MULTICAST(da)) {
params.ibp_flags |= IEEE80211_BPF_NOACK;
params.ibp_try0 = 1;
} else
params.ibp_try0 = tp->maxretry;
params.ibp_power = ni->ni_txpower;
ret = ieee80211_raw_output(vap, ni, m, &params);
IEEE80211_TX_UNLOCK(ic);
return (ret);
}
/*
* Calculate capability information for mgt frames.
*/
uint16_t
ieee80211_getcapinfo(struct ieee80211vap *vap, struct ieee80211_channel *chan)
{
struct ieee80211com *ic = vap->iv_ic;
uint16_t capinfo;
KASSERT(vap->iv_opmode != IEEE80211_M_STA, ("station mode"));
if (vap->iv_opmode == IEEE80211_M_HOSTAP)
capinfo = IEEE80211_CAPINFO_ESS;
else if (vap->iv_opmode == IEEE80211_M_IBSS)
capinfo = IEEE80211_CAPINFO_IBSS;
else
capinfo = 0;
if (vap->iv_flags & IEEE80211_F_PRIVACY)
capinfo |= IEEE80211_CAPINFO_PRIVACY;
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
IEEE80211_IS_CHAN_2GHZ(chan))
capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
if (ic->ic_flags & IEEE80211_F_SHSLOT)
capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
if (IEEE80211_IS_CHAN_5GHZ(chan) && (vap->iv_flags & IEEE80211_F_DOTH))
capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
return capinfo;
}
/*
* Send a management frame. The node is for the destination (or ic_bss
* when in station mode). Nodes other than ic_bss have their reference
* count bumped to reflect our use for an indeterminant time.
*/
int
ieee80211_send_mgmt(struct ieee80211_node *ni, int type, int arg)
{
#define HTFLAGS (IEEE80211_NODE_HT | IEEE80211_NODE_HTCOMPAT)
#define senderr(_x, _v) do { vap->iv_stats._v++; ret = _x; goto bad; } while (0)
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_node *bss = vap->iv_bss;
struct ieee80211_bpf_params params;
struct mbuf *m;
uint8_t *frm;
uint16_t capinfo;
int has_challenge, is_shared_key, ret, status;
KASSERT(ni != NULL, ("null node"));
/*
* Hold a reference on the node so it doesn't go away until after
* the xmit is complete all the way in the driver. On error we
* will remove our reference.
*/
IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
"ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n",
__func__, __LINE__,
ni, ether_sprintf(ni->ni_macaddr),
ieee80211_node_refcnt(ni)+1);
ieee80211_ref_node(ni);
memset(&params, 0, sizeof(params));
switch (type) {
case IEEE80211_FC0_SUBTYPE_AUTH:
status = arg >> 16;
arg &= 0xffff;
has_challenge = ((arg == IEEE80211_AUTH_SHARED_CHALLENGE ||
arg == IEEE80211_AUTH_SHARED_RESPONSE) &&
ni->ni_challenge != NULL);
/*
* Deduce whether we're doing open authentication or
* shared key authentication. We do the latter if
* we're in the middle of a shared key authentication
* handshake or if we're initiating an authentication
* request and configured to use shared key.
*/
is_shared_key = has_challenge ||
arg >= IEEE80211_AUTH_SHARED_RESPONSE ||
(arg == IEEE80211_AUTH_SHARED_REQUEST &&
bss->ni_authmode == IEEE80211_AUTH_SHARED);
m = ieee80211_getmgtframe(&frm,
ic->ic_headroom + sizeof(struct ieee80211_frame),
3 * sizeof(uint16_t)
+ (has_challenge && status == IEEE80211_STATUS_SUCCESS ?
sizeof(uint16_t)+IEEE80211_CHALLENGE_LEN : 0)
);
if (m == NULL)
senderr(ENOMEM, is_tx_nobuf);
((uint16_t *)frm)[0] =
(is_shared_key) ? htole16(IEEE80211_AUTH_ALG_SHARED)
: htole16(IEEE80211_AUTH_ALG_OPEN);
((uint16_t *)frm)[1] = htole16(arg); /* sequence number */
((uint16_t *)frm)[2] = htole16(status);/* status */
if (has_challenge && status == IEEE80211_STATUS_SUCCESS) {
((uint16_t *)frm)[3] =
htole16((IEEE80211_CHALLENGE_LEN << 8) |
IEEE80211_ELEMID_CHALLENGE);
memcpy(&((uint16_t *)frm)[4], ni->ni_challenge,
IEEE80211_CHALLENGE_LEN);
m->m_pkthdr.len = m->m_len =
4 * sizeof(uint16_t) + IEEE80211_CHALLENGE_LEN;
if (arg == IEEE80211_AUTH_SHARED_RESPONSE) {
IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
"request encrypt frame (%s)", __func__);
/* mark frame for encryption */
params.ibp_flags |= IEEE80211_BPF_CRYPTO;
}
} else
m->m_pkthdr.len = m->m_len = 3 * sizeof(uint16_t);
/* XXX not right for shared key */
if (status == IEEE80211_STATUS_SUCCESS)
IEEE80211_NODE_STAT(ni, tx_auth);
else
IEEE80211_NODE_STAT(ni, tx_auth_fail);
if (vap->iv_opmode == IEEE80211_M_STA)
ieee80211_add_callback(m, ieee80211_tx_mgt_cb,
(void *) vap->iv_state);
break;
case IEEE80211_FC0_SUBTYPE_DEAUTH:
IEEE80211_NOTE(vap, IEEE80211_MSG_AUTH, ni,
"send station deauthenticate (reason %d)", arg);
m = ieee80211_getmgtframe(&frm,
ic->ic_headroom + sizeof(struct ieee80211_frame),
sizeof(uint16_t));
if (m == NULL)
senderr(ENOMEM, is_tx_nobuf);
*(uint16_t *)frm = htole16(arg); /* reason */
m->m_pkthdr.len = m->m_len = sizeof(uint16_t);
IEEE80211_NODE_STAT(ni, tx_deauth);
IEEE80211_NODE_STAT_SET(ni, tx_deauth_code, arg);
ieee80211_node_unauthorize(ni); /* port closed */
break;
case IEEE80211_FC0_SUBTYPE_ASSOC_REQ:
case IEEE80211_FC0_SUBTYPE_REASSOC_REQ:
/*
* asreq frame format
* [2] capability information
* [2] listen interval
* [6*] current AP address (reassoc only)
* [tlv] ssid
* [tlv] supported rates
* [tlv] extended supported rates
* [4] power capability (optional)
* [28] supported channels (optional)
* [tlv] HT capabilities
* [tlv] WME (optional)
* [tlv] Vendor OUI HT capabilities (optional)
* [tlv] Atheros capabilities (if negotiated)
* [tlv] AppIE's (optional)
*/
m = ieee80211_getmgtframe(&frm,
ic->ic_headroom + sizeof(struct ieee80211_frame),
sizeof(uint16_t)
+ sizeof(uint16_t)
+ IEEE80211_ADDR_LEN
+ 2 + IEEE80211_NWID_LEN
+ 2 + IEEE80211_RATE_SIZE
+ 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ 4
+ 2 + 26
+ sizeof(struct ieee80211_wme_info)
+ sizeof(struct ieee80211_ie_htcap)
+ 4 + sizeof(struct ieee80211_ie_htcap)
#ifdef IEEE80211_SUPPORT_SUPERG
+ sizeof(struct ieee80211_ath_ie)
#endif
+ (vap->iv_appie_wpa != NULL ?
vap->iv_appie_wpa->ie_len : 0)
+ (vap->iv_appie_assocreq != NULL ?
vap->iv_appie_assocreq->ie_len : 0)
);
if (m == NULL)
senderr(ENOMEM, is_tx_nobuf);
KASSERT(vap->iv_opmode == IEEE80211_M_STA,
("wrong mode %u", vap->iv_opmode));
capinfo = IEEE80211_CAPINFO_ESS;
if (vap->iv_flags & IEEE80211_F_PRIVACY)
capinfo |= IEEE80211_CAPINFO_PRIVACY;
/*
* NB: Some 11a AP's reject the request when
* short premable is set.
*/
if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan))
capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) &&
(ic->ic_caps & IEEE80211_C_SHSLOT))
capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
if ((ni->ni_capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) &&
(vap->iv_flags & IEEE80211_F_DOTH))
capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;
*(uint16_t *)frm = htole16(capinfo);
frm += 2;
KASSERT(bss->ni_intval != 0, ("beacon interval is zero!"));
*(uint16_t *)frm = htole16(howmany(ic->ic_lintval,
bss->ni_intval));
frm += 2;
if (type == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
IEEE80211_ADDR_COPY(frm, bss->ni_bssid);
frm += IEEE80211_ADDR_LEN;
}
frm = ieee80211_add_ssid(frm, ni->ni_essid, ni->ni_esslen);
frm = ieee80211_add_rates(frm, &ni->ni_rates);
frm = ieee80211_add_rsn(frm, vap);
frm = ieee80211_add_xrates(frm, &ni->ni_rates);
if (capinfo & IEEE80211_CAPINFO_SPECTRUM_MGMT) {
frm = ieee80211_add_powercapability(frm,
ic->ic_curchan);
frm = ieee80211_add_supportedchannels(frm, ic);
}
/*
* Check the channel - we may be using an 11n NIC with an
* 11n capable station, but we're configured to be an 11b
* channel.
*/
if ((vap->iv_flags_ht & IEEE80211_FHT_HT) &&
IEEE80211_IS_CHAN_HT(ni->ni_chan) &&
ni->ni_ies.htcap_ie != NULL &&
ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_HTCAP) {
frm = ieee80211_add_htcap(frm, ni);
}
frm = ieee80211_add_wpa(frm, vap);
if ((ic->ic_flags & IEEE80211_F_WME) &&
ni->ni_ies.wme_ie != NULL)
frm = ieee80211_add_wme_info(frm, &ic->ic_wme);
/*
* Same deal - only send HT info if we're on an 11n
* capable channel.
*/
if ((vap->iv_flags_ht & IEEE80211_FHT_HT) &&
IEEE80211_IS_CHAN_HT(ni->ni_chan) &&
ni->ni_ies.htcap_ie != NULL &&
ni->ni_ies.htcap_ie[0] == IEEE80211_ELEMID_VENDOR) {
frm = ieee80211_add_htcap_vendor(frm, ni);
}
#ifdef IEEE80211_SUPPORT_SUPERG
if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS)) {
frm = ieee80211_add_ath(frm,
IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS),
((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
ni->ni_authmode != IEEE80211_AUTH_8021X) ?
vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
}
#endif /* IEEE80211_SUPPORT_SUPERG */
if (vap->iv_appie_assocreq != NULL)
frm = add_appie(frm, vap->iv_appie_assocreq);
m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
ieee80211_add_callback(m, ieee80211_tx_mgt_cb,
(void *) vap->iv_state);
break;
case IEEE80211_FC0_SUBTYPE_ASSOC_RESP:
case IEEE80211_FC0_SUBTYPE_REASSOC_RESP:
/*
* asresp frame format
* [2] capability information
* [2] status
* [2] association ID
* [tlv] supported rates
* [tlv] extended supported rates
* [tlv] HT capabilities (standard, if STA enabled)
* [tlv] HT information (standard, if STA enabled)
* [tlv] WME (if configured and STA enabled)
* [tlv] HT capabilities (vendor OUI, if STA enabled)
* [tlv] HT information (vendor OUI, if STA enabled)
* [tlv] Atheros capabilities (if STA enabled)
* [tlv] AppIE's (optional)
*/
m = ieee80211_getmgtframe(&frm,
ic->ic_headroom + sizeof(struct ieee80211_frame),
sizeof(uint16_t)
+ sizeof(uint16_t)
+ sizeof(uint16_t)
+ 2 + IEEE80211_RATE_SIZE
+ 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ sizeof(struct ieee80211_ie_htcap) + 4
+ sizeof(struct ieee80211_ie_htinfo) + 4
+ sizeof(struct ieee80211_wme_param)
#ifdef IEEE80211_SUPPORT_SUPERG
+ sizeof(struct ieee80211_ath_ie)
#endif
+ (vap->iv_appie_assocresp != NULL ?
vap->iv_appie_assocresp->ie_len : 0)
);
if (m == NULL)
senderr(ENOMEM, is_tx_nobuf);
capinfo = ieee80211_getcapinfo(vap, bss->ni_chan);
*(uint16_t *)frm = htole16(capinfo);
frm += 2;
*(uint16_t *)frm = htole16(arg); /* status */
frm += 2;
if (arg == IEEE80211_STATUS_SUCCESS) {
*(uint16_t *)frm = htole16(ni->ni_associd);
IEEE80211_NODE_STAT(ni, tx_assoc);
} else
IEEE80211_NODE_STAT(ni, tx_assoc_fail);
frm += 2;
frm = ieee80211_add_rates(frm, &ni->ni_rates);
frm = ieee80211_add_xrates(frm, &ni->ni_rates);
/* NB: respond according to what we received */
if ((ni->ni_flags & HTFLAGS) == IEEE80211_NODE_HT) {
frm = ieee80211_add_htcap(frm, ni);
frm = ieee80211_add_htinfo(frm, ni);
}
if ((vap->iv_flags & IEEE80211_F_WME) &&
ni->ni_ies.wme_ie != NULL)
frm = ieee80211_add_wme_param(frm, &ic->ic_wme);
if ((ni->ni_flags & HTFLAGS) == HTFLAGS) {
frm = ieee80211_add_htcap_vendor(frm, ni);
frm = ieee80211_add_htinfo_vendor(frm, ni);
}
#ifdef IEEE80211_SUPPORT_SUPERG
if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS))
frm = ieee80211_add_ath(frm,
IEEE80211_ATH_CAP(vap, ni, IEEE80211_F_ATHEROS),
((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
ni->ni_authmode != IEEE80211_AUTH_8021X) ?
vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
#endif /* IEEE80211_SUPPORT_SUPERG */
if (vap->iv_appie_assocresp != NULL)
frm = add_appie(frm, vap->iv_appie_assocresp);
m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
break;
case IEEE80211_FC0_SUBTYPE_DISASSOC:
IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni,
"send station disassociate (reason %d)", arg);
m = ieee80211_getmgtframe(&frm,
ic->ic_headroom + sizeof(struct ieee80211_frame),
sizeof(uint16_t));
if (m == NULL)
senderr(ENOMEM, is_tx_nobuf);
*(uint16_t *)frm = htole16(arg); /* reason */
m->m_pkthdr.len = m->m_len = sizeof(uint16_t);
IEEE80211_NODE_STAT(ni, tx_disassoc);
IEEE80211_NODE_STAT_SET(ni, tx_disassoc_code, arg);
break;
default:
IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
"invalid mgmt frame type %u", type);
senderr(EINVAL, is_tx_unknownmgt);
/* NOTREACHED */
}
/* NB: force non-ProbeResp frames to the highest queue */
params.ibp_pri = WME_AC_VO;
params.ibp_rate0 = bss->ni_txparms->mgmtrate;
/* NB: we know all frames are unicast */
params.ibp_try0 = bss->ni_txparms->maxretry;
params.ibp_power = bss->ni_txpower;
return ieee80211_mgmt_output(ni, m, type, &params);
bad:
ieee80211_free_node(ni);
return ret;
#undef senderr
#undef HTFLAGS
}
/*
* Return an mbuf with a probe response frame in it.
* Space is left to prepend and 802.11 header at the
* front but it's left to the caller to fill in.
*/
struct mbuf *
ieee80211_alloc_proberesp(struct ieee80211_node *bss, int legacy)
{
struct ieee80211vap *vap = bss->ni_vap;
struct ieee80211com *ic = bss->ni_ic;
const struct ieee80211_rateset *rs;
struct mbuf *m;
uint16_t capinfo;
uint8_t *frm;
/*
* probe response frame format
* [8] time stamp
* [2] beacon interval
* [2] cabability information
* [tlv] ssid
* [tlv] supported rates
* [tlv] parameter set (FH/DS)
* [tlv] parameter set (IBSS)
* [tlv] country (optional)
* [3] power control (optional)
* [5] channel switch announcement (CSA) (optional)
* [tlv] extended rate phy (ERP)
* [tlv] extended supported rates
* [tlv] RSN (optional)
* [tlv] HT capabilities
* [tlv] HT information
* [tlv] WPA (optional)
* [tlv] WME (optional)
* [tlv] Vendor OUI HT capabilities (optional)
* [tlv] Vendor OUI HT information (optional)
* [tlv] Atheros capabilities
* [tlv] AppIE's (optional)
* [tlv] Mesh ID (MBSS)
* [tlv] Mesh Conf (MBSS)
*/
m = ieee80211_getmgtframe(&frm,
ic->ic_headroom + sizeof(struct ieee80211_frame),
8
+ sizeof(uint16_t)
+ sizeof(uint16_t)
+ 2 + IEEE80211_NWID_LEN
+ 2 + IEEE80211_RATE_SIZE
+ 7 /* max(7,3) */
+ IEEE80211_COUNTRY_MAX_SIZE
+ 3
+ sizeof(struct ieee80211_csa_ie)
+ sizeof(struct ieee80211_quiet_ie)
+ 3
+ 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ sizeof(struct ieee80211_ie_wpa)
+ sizeof(struct ieee80211_ie_htcap)
+ sizeof(struct ieee80211_ie_htinfo)
+ sizeof(struct ieee80211_ie_wpa)
+ sizeof(struct ieee80211_wme_param)
+ 4 + sizeof(struct ieee80211_ie_htcap)
+ 4 + sizeof(struct ieee80211_ie_htinfo)
#ifdef IEEE80211_SUPPORT_SUPERG
+ sizeof(struct ieee80211_ath_ie)
#endif
#ifdef IEEE80211_SUPPORT_MESH
+ 2 + IEEE80211_MESHID_LEN
+ sizeof(struct ieee80211_meshconf_ie)
#endif
+ (vap->iv_appie_proberesp != NULL ?
vap->iv_appie_proberesp->ie_len : 0)
);
if (m == NULL) {
vap->iv_stats.is_tx_nobuf++;
return NULL;
}
memset(frm, 0, 8); /* timestamp should be filled later */
frm += 8;
*(uint16_t *)frm = htole16(bss->ni_intval);
frm += 2;
capinfo = ieee80211_getcapinfo(vap, bss->ni_chan);
*(uint16_t *)frm = htole16(capinfo);
frm += 2;
frm = ieee80211_add_ssid(frm, bss->ni_essid, bss->ni_esslen);
rs = ieee80211_get_suprates(ic, bss->ni_chan);
frm = ieee80211_add_rates(frm, rs);
if (IEEE80211_IS_CHAN_FHSS(bss->ni_chan)) {
*frm++ = IEEE80211_ELEMID_FHPARMS;
*frm++ = 5;
*frm++ = bss->ni_fhdwell & 0x00ff;
*frm++ = (bss->ni_fhdwell >> 8) & 0x00ff;
*frm++ = IEEE80211_FH_CHANSET(
ieee80211_chan2ieee(ic, bss->ni_chan));
*frm++ = IEEE80211_FH_CHANPAT(
ieee80211_chan2ieee(ic, bss->ni_chan));
*frm++ = bss->ni_fhindex;
} else {
*frm++ = IEEE80211_ELEMID_DSPARMS;
*frm++ = 1;
*frm++ = ieee80211_chan2ieee(ic, bss->ni_chan);
}
if (vap->iv_opmode == IEEE80211_M_IBSS) {
*frm++ = IEEE80211_ELEMID_IBSSPARMS;
*frm++ = 2;
*frm++ = 0; *frm++ = 0; /* TODO: ATIM window */
}
if ((vap->iv_flags & IEEE80211_F_DOTH) ||
(vap->iv_flags_ext & IEEE80211_FEXT_DOTD))
frm = ieee80211_add_countryie(frm, ic);
if (vap->iv_flags & IEEE80211_F_DOTH) {
if (IEEE80211_IS_CHAN_5GHZ(bss->ni_chan))
frm = ieee80211_add_powerconstraint(frm, vap);
if (ic->ic_flags & IEEE80211_F_CSAPENDING)
frm = ieee80211_add_csa(frm, vap);
}
if (vap->iv_flags & IEEE80211_F_DOTH) {
if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) &&
(vap->iv_flags_ext & IEEE80211_FEXT_DFS)) {
if (vap->iv_quiet)
frm = ieee80211_add_quiet(frm, vap);
}
}
if (IEEE80211_IS_CHAN_ANYG(bss->ni_chan))
frm = ieee80211_add_erp(frm, ic);
frm = ieee80211_add_xrates(frm, rs);
frm = ieee80211_add_rsn(frm, vap);
/*
* NB: legacy 11b clients do not get certain ie's.
* The caller identifies such clients by passing
* a token in legacy to us. Could expand this to be
* any legacy client for stuff like HT ie's.
*/
if (IEEE80211_IS_CHAN_HT(bss->ni_chan) &&
legacy != IEEE80211_SEND_LEGACY_11B) {
frm = ieee80211_add_htcap(frm, bss);
frm = ieee80211_add_htinfo(frm, bss);
}
frm = ieee80211_add_wpa(frm, vap);
if (vap->iv_flags & IEEE80211_F_WME)
frm = ieee80211_add_wme_param(frm, &ic->ic_wme);
if (IEEE80211_IS_CHAN_HT(bss->ni_chan) &&
(vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT) &&
legacy != IEEE80211_SEND_LEGACY_11B) {
frm = ieee80211_add_htcap_vendor(frm, bss);
frm = ieee80211_add_htinfo_vendor(frm, bss);
}
#ifdef IEEE80211_SUPPORT_SUPERG
if ((vap->iv_flags & IEEE80211_F_ATHEROS) &&
legacy != IEEE80211_SEND_LEGACY_11B)
frm = ieee80211_add_athcaps(frm, bss);
#endif
if (vap->iv_appie_proberesp != NULL)
frm = add_appie(frm, vap->iv_appie_proberesp);
#ifdef IEEE80211_SUPPORT_MESH
if (vap->iv_opmode == IEEE80211_M_MBSS) {
frm = ieee80211_add_meshid(frm, vap);
frm = ieee80211_add_meshconf(frm, vap);
}
#endif
m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
return m;
}
/*
* Send a probe response frame to the specified mac address.
* This does not go through the normal mgt frame api so we
* can specify the destination address and re-use the bss node
* for the sta reference.
*/
int
ieee80211_send_proberesp(struct ieee80211vap *vap,
const uint8_t da[IEEE80211_ADDR_LEN], int legacy)
{
struct ieee80211_node *bss = vap->iv_bss;
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_frame *wh;
struct mbuf *m;
int ret;
if (vap->iv_state == IEEE80211_S_CAC) {
IEEE80211_NOTE(vap, IEEE80211_MSG_OUTPUT, bss,
"block %s frame in CAC state", "probe response");
vap->iv_stats.is_tx_badstate++;
return EIO; /* XXX */
}
/*
* Hold a reference on the node so it doesn't go away until after
* the xmit is complete all the way in the driver. On error we
* will remove our reference.
*/
IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE,
"ieee80211_ref_node (%s:%u) %p<%s> refcnt %d\n",
__func__, __LINE__, bss, ether_sprintf(bss->ni_macaddr),
ieee80211_node_refcnt(bss)+1);
ieee80211_ref_node(bss);
m = ieee80211_alloc_proberesp(bss, legacy);
if (m == NULL) {
ieee80211_free_node(bss);
return ENOMEM;
}
M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT);
KASSERT(m != NULL, ("no room for header"));
IEEE80211_TX_LOCK(ic);
wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(bss, m,
IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP,
IEEE80211_NONQOS_TID, vap->iv_myaddr, da, bss->ni_bssid);
/* XXX power management? */
m->m_flags |= M_ENCAP; /* mark encapsulated */
M_WME_SETAC(m, WME_AC_BE);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_DUMPPKTS,
"send probe resp on channel %u to %s%s\n",
ieee80211_chan2ieee(ic, ic->ic_curchan), ether_sprintf(da),
legacy ? " <legacy>" : "");
IEEE80211_NODE_STAT(bss, tx_mgmt);
ret = ieee80211_raw_output(vap, bss, m, NULL);
IEEE80211_TX_UNLOCK(ic);
return (ret);
}
/*
* Allocate and build a RTS (Request To Send) control frame.
*/
struct mbuf *
ieee80211_alloc_rts(struct ieee80211com *ic,
const uint8_t ra[IEEE80211_ADDR_LEN],
const uint8_t ta[IEEE80211_ADDR_LEN],
uint16_t dur)
{
struct ieee80211_frame_rts *rts;
struct mbuf *m;
/* XXX honor ic_headroom */
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m != NULL) {
rts = mtod(m, struct ieee80211_frame_rts *);
rts->i_fc[0] = IEEE80211_FC0_VERSION_0 |
IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_RTS;
rts->i_fc[1] = IEEE80211_FC1_DIR_NODS;
*(u_int16_t *)rts->i_dur = htole16(dur);
IEEE80211_ADDR_COPY(rts->i_ra, ra);
IEEE80211_ADDR_COPY(rts->i_ta, ta);
m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_rts);
}
return m;
}
/*
* Allocate and build a CTS (Clear To Send) control frame.
*/
struct mbuf *
ieee80211_alloc_cts(struct ieee80211com *ic,
const uint8_t ra[IEEE80211_ADDR_LEN], uint16_t dur)
{
struct ieee80211_frame_cts *cts;
struct mbuf *m;
/* XXX honor ic_headroom */
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m != NULL) {
cts = mtod(m, struct ieee80211_frame_cts *);
cts->i_fc[0] = IEEE80211_FC0_VERSION_0 |
IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_CTS;
cts->i_fc[1] = IEEE80211_FC1_DIR_NODS;
*(u_int16_t *)cts->i_dur = htole16(dur);
IEEE80211_ADDR_COPY(cts->i_ra, ra);
m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame_cts);
}
return m;
}
static void
ieee80211_tx_mgt_timeout(void *arg)
{
struct ieee80211vap *vap = arg;
IEEE80211_LOCK(vap->iv_ic);
if (vap->iv_state != IEEE80211_S_INIT &&
(vap->iv_ic->ic_flags & IEEE80211_F_SCAN) == 0) {
/*
* NB: it's safe to specify a timeout as the reason here;
* it'll only be used in the right state.
*/
ieee80211_new_state_locked(vap, IEEE80211_S_SCAN,
IEEE80211_SCAN_FAIL_TIMEOUT);
}
IEEE80211_UNLOCK(vap->iv_ic);
}
/*
* This is the callback set on net80211-sourced transmitted
* authentication request frames.
*
* This does a couple of things:
*
* + If the frame transmitted was a success, it schedules a future
* event which will transition the interface to scan.
* If a state transition _then_ occurs before that event occurs,
* said state transition will cancel this callout.
*
* + If the frame transmit was a failure, it immediately schedules
* the transition back to scan.
*/
static void
ieee80211_tx_mgt_cb(struct ieee80211_node *ni, void *arg, int status)
{
struct ieee80211vap *vap = ni->ni_vap;
enum ieee80211_state ostate = (enum ieee80211_state) arg;
/*
* Frame transmit completed; arrange timer callback. If
* transmit was successfuly we wait for response. Otherwise
* we arrange an immediate callback instead of doing the
* callback directly since we don't know what state the driver
* is in (e.g. what locks it is holding). This work should
* not be too time-critical and not happen too often so the
* added overhead is acceptable.
*
* XXX what happens if !acked but response shows up before callback?
*/
if (vap->iv_state == ostate) {
callout_reset(&vap->iv_mgtsend,
status == 0 ? IEEE80211_TRANS_WAIT*hz : 0,
ieee80211_tx_mgt_timeout, vap);
}
}
static void
ieee80211_beacon_construct(struct mbuf *m, uint8_t *frm,
struct ieee80211_beacon_offsets *bo, struct ieee80211_node *ni)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_rateset *rs = &ni->ni_rates;
uint16_t capinfo;
/*
* beacon frame format
* [8] time stamp
* [2] beacon interval
* [2] cabability information
* [tlv] ssid
* [tlv] supported rates
* [3] parameter set (DS)
* [8] CF parameter set (optional)
* [tlv] parameter set (IBSS/TIM)
* [tlv] country (optional)
* [3] power control (optional)
* [5] channel switch announcement (CSA) (optional)
* [tlv] extended rate phy (ERP)
* [tlv] extended supported rates
* [tlv] RSN parameters
* [tlv] HT capabilities
* [tlv] HT information
* XXX Vendor-specific OIDs (e.g. Atheros)
* [tlv] WPA parameters
* [tlv] WME parameters
* [tlv] Vendor OUI HT capabilities (optional)
* [tlv] Vendor OUI HT information (optional)
* [tlv] Atheros capabilities (optional)
* [tlv] TDMA parameters (optional)
* [tlv] Mesh ID (MBSS)
* [tlv] Mesh Conf (MBSS)
* [tlv] application data (optional)
*/
memset(bo, 0, sizeof(*bo));
memset(frm, 0, 8); /* XXX timestamp is set by hardware/driver */
frm += 8;
*(uint16_t *)frm = htole16(ni->ni_intval);
frm += 2;
capinfo = ieee80211_getcapinfo(vap, ni->ni_chan);
bo->bo_caps = (uint16_t *)frm;
*(uint16_t *)frm = htole16(capinfo);
frm += 2;
*frm++ = IEEE80211_ELEMID_SSID;
if ((vap->iv_flags & IEEE80211_F_HIDESSID) == 0) {
*frm++ = ni->ni_esslen;
memcpy(frm, ni->ni_essid, ni->ni_esslen);
frm += ni->ni_esslen;
} else
*frm++ = 0;
frm = ieee80211_add_rates(frm, rs);
if (!IEEE80211_IS_CHAN_FHSS(ni->ni_chan)) {
*frm++ = IEEE80211_ELEMID_DSPARMS;
*frm++ = 1;
*frm++ = ieee80211_chan2ieee(ic, ni->ni_chan);
}
if (ic->ic_flags & IEEE80211_F_PCF) {
bo->bo_cfp = frm;
frm = ieee80211_add_cfparms(frm, ic);
}
bo->bo_tim = frm;
if (vap->iv_opmode == IEEE80211_M_IBSS) {
*frm++ = IEEE80211_ELEMID_IBSSPARMS;
*frm++ = 2;
*frm++ = 0; *frm++ = 0; /* TODO: ATIM window */
bo->bo_tim_len = 0;
} else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_MBSS) {
/* TIM IE is the same for Mesh and Hostap */
struct ieee80211_tim_ie *tie = (struct ieee80211_tim_ie *) frm;
tie->tim_ie = IEEE80211_ELEMID_TIM;
tie->tim_len = 4; /* length */
tie->tim_count = 0; /* DTIM count */
tie->tim_period = vap->iv_dtim_period; /* DTIM period */
tie->tim_bitctl = 0; /* bitmap control */
tie->tim_bitmap[0] = 0; /* Partial Virtual Bitmap */
frm += sizeof(struct ieee80211_tim_ie);
bo->bo_tim_len = 1;
}
bo->bo_tim_trailer = frm;
if ((vap->iv_flags & IEEE80211_F_DOTH) ||
(vap->iv_flags_ext & IEEE80211_FEXT_DOTD))
frm = ieee80211_add_countryie(frm, ic);
if (vap->iv_flags & IEEE80211_F_DOTH) {
if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan))
frm = ieee80211_add_powerconstraint(frm, vap);
bo->bo_csa = frm;
if (ic->ic_flags & IEEE80211_F_CSAPENDING)
frm = ieee80211_add_csa(frm, vap);
} else
bo->bo_csa = frm;
if (vap->iv_flags & IEEE80211_F_DOTH) {
bo->bo_quiet = frm;
if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) &&
(vap->iv_flags_ext & IEEE80211_FEXT_DFS)) {
if (vap->iv_quiet)
frm = ieee80211_add_quiet(frm,vap);
}
} else
bo->bo_quiet = frm;
if (IEEE80211_IS_CHAN_ANYG(ni->ni_chan)) {
bo->bo_erp = frm;
frm = ieee80211_add_erp(frm, ic);
}
frm = ieee80211_add_xrates(frm, rs);
frm = ieee80211_add_rsn(frm, vap);
if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) {
frm = ieee80211_add_htcap(frm, ni);
bo->bo_htinfo = frm;
frm = ieee80211_add_htinfo(frm, ni);
}
frm = ieee80211_add_wpa(frm, vap);
if (vap->iv_flags & IEEE80211_F_WME) {
bo->bo_wme = frm;
frm = ieee80211_add_wme_param(frm, &ic->ic_wme);
}
if (IEEE80211_IS_CHAN_HT(ni->ni_chan) &&
(vap->iv_flags_ht & IEEE80211_FHT_HTCOMPAT)) {
frm = ieee80211_add_htcap_vendor(frm, ni);
frm = ieee80211_add_htinfo_vendor(frm, ni);
}
#ifdef IEEE80211_SUPPORT_SUPERG
if (vap->iv_flags & IEEE80211_F_ATHEROS) {
bo->bo_ath = frm;
frm = ieee80211_add_athcaps(frm, ni);
}
#endif
#ifdef IEEE80211_SUPPORT_TDMA
if (vap->iv_caps & IEEE80211_C_TDMA) {
bo->bo_tdma = frm;
frm = ieee80211_add_tdma(frm, vap);
}
#endif
if (vap->iv_appie_beacon != NULL) {
bo->bo_appie = frm;
bo->bo_appie_len = vap->iv_appie_beacon->ie_len;
frm = add_appie(frm, vap->iv_appie_beacon);
}
#ifdef IEEE80211_SUPPORT_MESH
if (vap->iv_opmode == IEEE80211_M_MBSS) {
frm = ieee80211_add_meshid(frm, vap);
bo->bo_meshconf = frm;
frm = ieee80211_add_meshconf(frm, vap);
}
#endif
bo->bo_tim_trailer_len = frm - bo->bo_tim_trailer;
bo->bo_csa_trailer_len = frm - bo->bo_csa;
m->m_pkthdr.len = m->m_len = frm - mtod(m, uint8_t *);
}
/*
* Allocate a beacon frame and fillin the appropriate bits.
*/
struct mbuf *
ieee80211_beacon_alloc(struct ieee80211_node *ni,
struct ieee80211_beacon_offsets *bo)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ifnet *ifp = vap->iv_ifp;
struct ieee80211_frame *wh;
struct mbuf *m;
int pktlen;
uint8_t *frm;
/*
* beacon frame format
* [8] time stamp
* [2] beacon interval
* [2] cabability information
* [tlv] ssid
* [tlv] supported rates
* [3] parameter set (DS)
* [8] CF parameter set (optional)
* [tlv] parameter set (IBSS/TIM)
* [tlv] country (optional)
* [3] power control (optional)
* [5] channel switch announcement (CSA) (optional)
* [tlv] extended rate phy (ERP)
* [tlv] extended supported rates
* [tlv] RSN parameters
* [tlv] HT capabilities
* [tlv] HT information
* [tlv] Vendor OUI HT capabilities (optional)
* [tlv] Vendor OUI HT information (optional)
* XXX Vendor-specific OIDs (e.g. Atheros)
* [tlv] WPA parameters
* [tlv] WME parameters
* [tlv] TDMA parameters (optional)
* [tlv] Mesh ID (MBSS)
* [tlv] Mesh Conf (MBSS)
* [tlv] application data (optional)
* NB: we allocate the max space required for the TIM bitmap.
* XXX how big is this?
*/
pktlen = 8 /* time stamp */
+ sizeof(uint16_t) /* beacon interval */
+ sizeof(uint16_t) /* capabilities */
+ 2 + ni->ni_esslen /* ssid */
+ 2 + IEEE80211_RATE_SIZE /* supported rates */
+ 2 + 1 /* DS parameters */
+ 2 + 6 /* CF parameters */
+ 2 + 4 + vap->iv_tim_len /* DTIM/IBSSPARMS */
+ IEEE80211_COUNTRY_MAX_SIZE /* country */
+ 2 + 1 /* power control */
+ sizeof(struct ieee80211_csa_ie) /* CSA */
+ sizeof(struct ieee80211_quiet_ie) /* Quiet */
+ 2 + 1 /* ERP */
+ 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE)
+ (vap->iv_caps & IEEE80211_C_WPA ? /* WPA 1+2 */
2*sizeof(struct ieee80211_ie_wpa) : 0)
/* XXX conditional? */
+ 4+2*sizeof(struct ieee80211_ie_htcap)/* HT caps */
+ 4+2*sizeof(struct ieee80211_ie_htinfo)/* HT info */
+ (vap->iv_caps & IEEE80211_C_WME ? /* WME */
sizeof(struct ieee80211_wme_param) : 0)
#ifdef IEEE80211_SUPPORT_SUPERG
+ sizeof(struct ieee80211_ath_ie) /* ATH */
#endif
#ifdef IEEE80211_SUPPORT_TDMA
+ (vap->iv_caps & IEEE80211_C_TDMA ? /* TDMA */
sizeof(struct ieee80211_tdma_param) : 0)
#endif
#ifdef IEEE80211_SUPPORT_MESH
+ 2 + ni->ni_meshidlen
+ sizeof(struct ieee80211_meshconf_ie)
#endif
+ IEEE80211_MAX_APPIE
;
m = ieee80211_getmgtframe(&frm,
ic->ic_headroom + sizeof(struct ieee80211_frame), pktlen);
if (m == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY,
"%s: cannot get buf; size %u\n", __func__, pktlen);
vap->iv_stats.is_tx_nobuf++;
return NULL;
}
ieee80211_beacon_construct(m, frm, bo, ni);
M_PREPEND(m, sizeof(struct ieee80211_frame), M_NOWAIT);
KASSERT(m != NULL, ("no space for 802.11 header?"));
wh = mtod(m, struct ieee80211_frame *);
wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
IEEE80211_FC0_SUBTYPE_BEACON;
wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
*(uint16_t *)wh->i_dur = 0;
IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr);
IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
IEEE80211_ADDR_COPY(wh->i_addr3, ni->ni_bssid);
*(uint16_t *)wh->i_seq = 0;
return m;
}
/*
* Update the dynamic parts of a beacon frame based on the current state.
*/
int
ieee80211_beacon_update(struct ieee80211_node *ni,
struct ieee80211_beacon_offsets *bo, struct mbuf *m, int mcast)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
int len_changed = 0;
uint16_t capinfo;
struct ieee80211_frame *wh;
ieee80211_seq seqno;
IEEE80211_LOCK(ic);
/*
* Handle 11h channel change when we've reached the count.
* We must recalculate the beacon frame contents to account
* for the new channel. Note we do this only for the first
* vap that reaches this point; subsequent vaps just update
* their beacon state to reflect the recalculated channel.
*/
if (isset(bo->bo_flags, IEEE80211_BEACON_CSA) &&
vap->iv_csa_count == ic->ic_csa_count) {
vap->iv_csa_count = 0;
/*
* Effect channel change before reconstructing the beacon
* frame contents as many places reference ni_chan.
*/
if (ic->ic_csa_newchan != NULL)
ieee80211_csa_completeswitch(ic);
/*
* NB: ieee80211_beacon_construct clears all pending
* updates in bo_flags so we don't need to explicitly
* clear IEEE80211_BEACON_CSA.
*/
ieee80211_beacon_construct(m,
mtod(m, uint8_t*) + sizeof(struct ieee80211_frame), bo, ni);
/* XXX do WME aggressive mode processing? */
IEEE80211_UNLOCK(ic);
return 1; /* just assume length changed */
}
wh = mtod(m, struct ieee80211_frame *);
seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]++;
*(uint16_t *)&wh->i_seq[0] =
htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
M_SEQNO_SET(m, seqno);
/* XXX faster to recalculate entirely or just changes? */
capinfo = ieee80211_getcapinfo(vap, ni->ni_chan);
*bo->bo_caps = htole16(capinfo);
if (vap->iv_flags & IEEE80211_F_WME) {
struct ieee80211_wme_state *wme = &ic->ic_wme;
/*
* Check for agressive mode change. When there is
* significant high priority traffic in the BSS
* throttle back BE traffic by using conservative
* parameters. Otherwise BE uses agressive params
* to optimize performance of legacy/non-QoS traffic.
*/
if (wme->wme_flags & WME_F_AGGRMODE) {
if (wme->wme_hipri_traffic >
wme->wme_hipri_switch_thresh) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
"%s: traffic %u, disable aggressive mode\n",
__func__, wme->wme_hipri_traffic);
wme->wme_flags &= ~WME_F_AGGRMODE;
ieee80211_wme_updateparams_locked(vap);
wme->wme_hipri_traffic =
wme->wme_hipri_switch_hysteresis;
} else
wme->wme_hipri_traffic = 0;
} else {
if (wme->wme_hipri_traffic <=
wme->wme_hipri_switch_thresh) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
"%s: traffic %u, enable aggressive mode\n",
__func__, wme->wme_hipri_traffic);
wme->wme_flags |= WME_F_AGGRMODE;
ieee80211_wme_updateparams_locked(vap);
wme->wme_hipri_traffic = 0;
} else
wme->wme_hipri_traffic =
wme->wme_hipri_switch_hysteresis;
}
if (isset(bo->bo_flags, IEEE80211_BEACON_WME)) {
(void) ieee80211_add_wme_param(bo->bo_wme, wme);
clrbit(bo->bo_flags, IEEE80211_BEACON_WME);
}
}
if (isset(bo->bo_flags, IEEE80211_BEACON_HTINFO)) {
ieee80211_ht_update_beacon(vap, bo);
clrbit(bo->bo_flags, IEEE80211_BEACON_HTINFO);
}
#ifdef IEEE80211_SUPPORT_TDMA
if (vap->iv_caps & IEEE80211_C_TDMA) {
/*
* NB: the beacon is potentially updated every TBTT.
*/
ieee80211_tdma_update_beacon(vap, bo);
}
#endif
#ifdef IEEE80211_SUPPORT_MESH
if (vap->iv_opmode == IEEE80211_M_MBSS)
ieee80211_mesh_update_beacon(vap, bo);
#endif
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_MBSS) { /* NB: no IBSS support*/
struct ieee80211_tim_ie *tie =
(struct ieee80211_tim_ie *) bo->bo_tim;
if (isset(bo->bo_flags, IEEE80211_BEACON_TIM)) {
u_int timlen, timoff, i;
/*
* ATIM/DTIM needs updating. If it fits in the
* current space allocated then just copy in the
* new bits. Otherwise we need to move any trailing
* data to make room. Note that we know there is
* contiguous space because ieee80211_beacon_allocate
* insures there is space in the mbuf to write a
* maximal-size virtual bitmap (based on iv_max_aid).
*/
/*
* Calculate the bitmap size and offset, copy any
* trailer out of the way, and then copy in the
* new bitmap and update the information element.
* Note that the tim bitmap must contain at least
* one byte and any offset must be even.
*/
if (vap->iv_ps_pending != 0) {
timoff = 128; /* impossibly large */
for (i = 0; i < vap->iv_tim_len; i++)
if (vap->iv_tim_bitmap[i]) {
timoff = i &~ 1;
break;
}
KASSERT(timoff != 128, ("tim bitmap empty!"));
for (i = vap->iv_tim_len-1; i >= timoff; i--)
if (vap->iv_tim_bitmap[i])
break;
timlen = 1 + (i - timoff);
} else {
timoff = 0;
timlen = 1;
}
if (timlen != bo->bo_tim_len) {
/* copy up/down trailer */
int adjust = tie->tim_bitmap+timlen
- bo->bo_tim_trailer;
ovbcopy(bo->bo_tim_trailer,
bo->bo_tim_trailer+adjust,
bo->bo_tim_trailer_len);
bo->bo_tim_trailer += adjust;
bo->bo_erp += adjust;
bo->bo_htinfo += adjust;
#ifdef IEEE80211_SUPPORT_SUPERG
bo->bo_ath += adjust;
#endif
#ifdef IEEE80211_SUPPORT_TDMA
bo->bo_tdma += adjust;
#endif
#ifdef IEEE80211_SUPPORT_MESH
bo->bo_meshconf += adjust;
#endif
bo->bo_appie += adjust;
bo->bo_wme += adjust;
bo->bo_csa += adjust;
bo->bo_quiet += adjust;
bo->bo_tim_len = timlen;
/* update information element */
tie->tim_len = 3 + timlen;
tie->tim_bitctl = timoff;
len_changed = 1;
}
memcpy(tie->tim_bitmap, vap->iv_tim_bitmap + timoff,
bo->bo_tim_len);
clrbit(bo->bo_flags, IEEE80211_BEACON_TIM);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER,
"%s: TIM updated, pending %u, off %u, len %u\n",
__func__, vap->iv_ps_pending, timoff, timlen);
}
/* count down DTIM period */
if (tie->tim_count == 0)
tie->tim_count = tie->tim_period - 1;
else
tie->tim_count--;
/* update state for buffered multicast frames on DTIM */
if (mcast && tie->tim_count == 0)
tie->tim_bitctl |= 1;
else
tie->tim_bitctl &= ~1;
if (isset(bo->bo_flags, IEEE80211_BEACON_CSA)) {
struct ieee80211_csa_ie *csa =
(struct ieee80211_csa_ie *) bo->bo_csa;
/*
* Insert or update CSA ie. If we're just starting
* to count down to the channel switch then we need
* to insert the CSA ie. Otherwise we just need to
* drop the count. The actual change happens above
* when the vap's count reaches the target count.
*/
if (vap->iv_csa_count == 0) {
memmove(&csa[1], csa, bo->bo_csa_trailer_len);
bo->bo_erp += sizeof(*csa);
bo->bo_htinfo += sizeof(*csa);
bo->bo_wme += sizeof(*csa);
#ifdef IEEE80211_SUPPORT_SUPERG
bo->bo_ath += sizeof(*csa);
#endif
#ifdef IEEE80211_SUPPORT_TDMA
bo->bo_tdma += sizeof(*csa);
#endif
#ifdef IEEE80211_SUPPORT_MESH
bo->bo_meshconf += sizeof(*csa);
#endif
bo->bo_appie += sizeof(*csa);
bo->bo_csa_trailer_len += sizeof(*csa);
bo->bo_quiet += sizeof(*csa);
bo->bo_tim_trailer_len += sizeof(*csa);
m->m_len += sizeof(*csa);
m->m_pkthdr.len += sizeof(*csa);
ieee80211_add_csa(bo->bo_csa, vap);
} else
csa->csa_count--;
vap->iv_csa_count++;
/* NB: don't clear IEEE80211_BEACON_CSA */
}
if (IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) &&
(vap->iv_flags_ext & IEEE80211_FEXT_DFS) ){
if (vap->iv_quiet)
ieee80211_add_quiet(bo->bo_quiet, vap);
}
if (isset(bo->bo_flags, IEEE80211_BEACON_ERP)) {
/*
* ERP element needs updating.
*/
(void) ieee80211_add_erp(bo->bo_erp, ic);
clrbit(bo->bo_flags, IEEE80211_BEACON_ERP);
}
#ifdef IEEE80211_SUPPORT_SUPERG
if (isset(bo->bo_flags, IEEE80211_BEACON_ATH)) {
ieee80211_add_athcaps(bo->bo_ath, ni);
clrbit(bo->bo_flags, IEEE80211_BEACON_ATH);
}
#endif
}
if (isset(bo->bo_flags, IEEE80211_BEACON_APPIE)) {
const struct ieee80211_appie *aie = vap->iv_appie_beacon;
int aielen;
uint8_t *frm;
aielen = 0;
if (aie != NULL)
aielen += aie->ie_len;
if (aielen != bo->bo_appie_len) {
/* copy up/down trailer */
int adjust = aielen - bo->bo_appie_len;
ovbcopy(bo->bo_tim_trailer, bo->bo_tim_trailer+adjust,
bo->bo_tim_trailer_len);
bo->bo_tim_trailer += adjust;
bo->bo_appie += adjust;
bo->bo_appie_len = aielen;
len_changed = 1;
}
frm = bo->bo_appie;
if (aie != NULL)
frm = add_appie(frm, aie);
clrbit(bo->bo_flags, IEEE80211_BEACON_APPIE);
}
IEEE80211_UNLOCK(ic);
return len_changed;
}
/*
* Do Ethernet-LLC encapsulation for each payload in a fast frame
* tunnel encapsulation. The frame is assumed to have an Ethernet
* header at the front that must be stripped before prepending the
* LLC followed by the Ethernet header passed in (with an Ethernet
* type that specifies the payload size).
*/
struct mbuf *
ieee80211_ff_encap1(struct ieee80211vap *vap, struct mbuf *m,
const struct ether_header *eh)
{
struct llc *llc;
uint16_t payload;
/* XXX optimize by combining m_adj+M_PREPEND */
m_adj(m, sizeof(struct ether_header) - sizeof(struct llc));
llc = mtod(m, struct llc *);
llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
llc->llc_control = LLC_UI;
llc->llc_snap.org_code[0] = 0;
llc->llc_snap.org_code[1] = 0;
llc->llc_snap.org_code[2] = 0;
llc->llc_snap.ether_type = eh->ether_type;
payload = m->m_pkthdr.len; /* NB: w/o Ethernet header */
M_PREPEND(m, sizeof(struct ether_header), M_NOWAIT);
if (m == NULL) { /* XXX cannot happen */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
"%s: no space for ether_header\n", __func__);
vap->iv_stats.is_tx_nobuf++;
return NULL;
}
ETHER_HEADER_COPY(mtod(m, void *), eh);
mtod(m, struct ether_header *)->ether_type = htons(payload);
return m;
}
/*
* Complete an mbuf transmission.
*
* For now, this simply processes a completed frame after the
* driver has completed it's transmission and/or retransmission.
* It assumes the frame is an 802.11 encapsulated frame.
*
* Later on it will grow to become the exit path for a given frame
* from the driver and, depending upon how it's been encapsulated
* and already transmitted, it may end up doing A-MPDU retransmission,
* power save requeuing, etc.
*
* In order for the above to work, the driver entry point to this
* must not hold any driver locks. Thus, the driver needs to delay
* any actual mbuf completion until it can release said locks.
*
* This frees the mbuf and if the mbuf has a node reference,
* the node reference will be freed.
*/
void
ieee80211_tx_complete(struct ieee80211_node *ni, struct mbuf *m, int status)
{
if (ni != NULL) {
+ struct ifnet *ifp = ni->ni_vap->iv_ifp;
+
+ if (status == 0) {
+ if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ if (m->m_flags & M_MCAST)
+ if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
+ } else
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
if (m->m_flags & M_TXCB)
ieee80211_process_callback(ni, m, status);
ieee80211_free_node(ni);
}
m_freem(m);
}
Index: head/sys/net80211/ieee80211_power.c
===================================================================
--- head/sys/net80211/ieee80211_power.c (revision 287196)
+++ head/sys/net80211/ieee80211_power.c (revision 287197)
@@ -1,663 +1,654 @@
/*-
* Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* IEEE 802.11 power save support.
*/
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/socket.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h>
#include <net80211/ieee80211_var.h>
#include <net/bpf.h>
static void ieee80211_update_ps(struct ieee80211vap *, int);
static int ieee80211_set_tim(struct ieee80211_node *, int);
static MALLOC_DEFINE(M_80211_POWER, "80211power", "802.11 power save state");
void
ieee80211_power_attach(struct ieee80211com *ic)
{
}
void
ieee80211_power_detach(struct ieee80211com *ic)
{
}
void
ieee80211_power_vattach(struct ieee80211vap *vap)
{
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS) {
/* NB: driver should override */
vap->iv_update_ps = ieee80211_update_ps;
vap->iv_set_tim = ieee80211_set_tim;
}
vap->iv_node_ps = ieee80211_node_pwrsave;
vap->iv_sta_ps = ieee80211_sta_pwrsave;
}
void
ieee80211_power_latevattach(struct ieee80211vap *vap)
{
/*
* Allocate these only if needed. Beware that we
* know adhoc mode doesn't support ATIM yet...
*/
if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
vap->iv_tim_len = howmany(vap->iv_max_aid,8) * sizeof(uint8_t);
vap->iv_tim_bitmap = (uint8_t *) IEEE80211_MALLOC(vap->iv_tim_len,
M_80211_POWER,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (vap->iv_tim_bitmap == NULL) {
printf("%s: no memory for TIM bitmap!\n", __func__);
/* XXX good enough to keep from crashing? */
vap->iv_tim_len = 0;
}
}
}
void
ieee80211_power_vdetach(struct ieee80211vap *vap)
{
if (vap->iv_tim_bitmap != NULL) {
IEEE80211_FREE(vap->iv_tim_bitmap, M_80211_POWER);
vap->iv_tim_bitmap = NULL;
}
}
void
ieee80211_psq_init(struct ieee80211_psq *psq, const char *name)
{
memset(psq, 0, sizeof(*psq));
psq->psq_maxlen = IEEE80211_PS_MAX_QUEUE;
IEEE80211_PSQ_INIT(psq, name); /* OS-dependent setup */
}
void
ieee80211_psq_cleanup(struct ieee80211_psq *psq)
{
#if 0
psq_drain(psq); /* XXX should not be needed? */
#else
KASSERT(psq->psq_len == 0, ("%d frames on ps q", psq->psq_len));
#endif
IEEE80211_PSQ_DESTROY(psq); /* OS-dependent cleanup */
}
/*
* Return the highest priority frame in the ps queue.
*/
struct mbuf *
ieee80211_node_psq_dequeue(struct ieee80211_node *ni, int *qlen)
{
struct ieee80211_psq *psq = &ni->ni_psq;
struct ieee80211_psq_head *qhead;
struct mbuf *m;
IEEE80211_PSQ_LOCK(psq);
qhead = &psq->psq_head[0];
again:
if ((m = qhead->head) != NULL) {
if ((qhead->head = m->m_nextpkt) == NULL)
qhead->tail = NULL;
KASSERT(qhead->len > 0, ("qhead len %d", qhead->len));
qhead->len--;
KASSERT(psq->psq_len > 0, ("psq len %d", psq->psq_len));
psq->psq_len--;
m->m_nextpkt = NULL;
}
if (m == NULL && qhead == &psq->psq_head[0]) {
/* Algol-68 style for loop */
qhead = &psq->psq_head[1];
goto again;
}
if (qlen != NULL)
*qlen = psq->psq_len;
IEEE80211_PSQ_UNLOCK(psq);
return m;
}
/*
* Reclaim an mbuf from the ps q. If marked with M_ENCAP
* we assume there is a node reference that must be relcaimed.
*/
static void
psq_mfree(struct mbuf *m)
{
if (m->m_flags & M_ENCAP) {
struct ieee80211_node *ni = (void *) m->m_pkthdr.rcvif;
ieee80211_free_node(ni);
}
m->m_nextpkt = NULL;
m_freem(m);
}
/*
* Clear any frames queued in the power save queue.
* The number of frames that were present is returned.
*/
static int
psq_drain(struct ieee80211_psq *psq)
{
struct ieee80211_psq_head *qhead;
struct mbuf *m;
int qlen;
IEEE80211_PSQ_LOCK(psq);
qlen = psq->psq_len;
qhead = &psq->psq_head[0];
again:
while ((m = qhead->head) != NULL) {
qhead->head = m->m_nextpkt;
psq_mfree(m);
}
qhead->tail = NULL;
qhead->len = 0;
if (qhead == &psq->psq_head[0]) { /* Algol-68 style for loop */
qhead = &psq->psq_head[1];
goto again;
}
psq->psq_len = 0;
IEEE80211_PSQ_UNLOCK(psq);
return qlen;
}
/*
* Clear any frames queued in the power save queue.
* The number of frames that were present is returned.
*/
int
ieee80211_node_psq_drain(struct ieee80211_node *ni)
{
return psq_drain(&ni->ni_psq);
}
/*
* Age frames on the power save queue. The aging interval is
* 4 times the listen interval specified by the station. This
* number is factored into the age calculations when the frame
* is placed on the queue. We store ages as time differences
* so we can check and/or adjust only the head of the list.
* If a frame's age exceeds the threshold then discard it.
* The number of frames discarded is returned so the caller
* can check if it needs to adjust the tim.
*/
int
ieee80211_node_psq_age(struct ieee80211_node *ni)
{
struct ieee80211_psq *psq = &ni->ni_psq;
int discard = 0;
if (psq->psq_len != 0) {
#ifdef IEEE80211_DEBUG
struct ieee80211vap *vap = ni->ni_vap;
#endif
struct ieee80211_psq_head *qhead;
struct mbuf *m;
IEEE80211_PSQ_LOCK(psq);
qhead = &psq->psq_head[0];
again:
while ((m = qhead->head) != NULL &&
M_AGE_GET(m) < IEEE80211_INACT_WAIT) {
IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
"discard frame, age %u", M_AGE_GET(m));
if ((qhead->head = m->m_nextpkt) == NULL)
qhead->tail = NULL;
KASSERT(qhead->len > 0, ("qhead len %d", qhead->len));
qhead->len--;
KASSERT(psq->psq_len > 0, ("psq len %d", psq->psq_len));
psq->psq_len--;
psq_mfree(m);
discard++;
}
if (qhead == &psq->psq_head[0]) { /* Algol-68 style for loop */
qhead = &psq->psq_head[1];
goto again;
}
if (m != NULL)
M_AGE_SUB(m, IEEE80211_INACT_WAIT);
IEEE80211_PSQ_UNLOCK(psq);
IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
"discard %u frames for age", discard);
IEEE80211_NODE_STAT_ADD(ni, ps_discard, discard);
}
return discard;
}
/*
* Handle a change in the PS station occupancy.
*/
static void
ieee80211_update_ps(struct ieee80211vap *vap, int nsta)
{
KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS,
("operating mode %u", vap->iv_opmode));
}
/*
* Indicate whether there are frames queued for a station in power-save mode.
*/
static int
ieee80211_set_tim(struct ieee80211_node *ni, int set)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
uint16_t aid;
int changed;
KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS,
("operating mode %u", vap->iv_opmode));
aid = IEEE80211_AID(ni->ni_associd);
KASSERT(aid < vap->iv_max_aid,
("bogus aid %u, max %u", aid, vap->iv_max_aid));
IEEE80211_LOCK(ic);
changed = (set != (isset(vap->iv_tim_bitmap, aid) != 0));
if (changed) {
if (set) {
setbit(vap->iv_tim_bitmap, aid);
vap->iv_ps_pending++;
} else {
clrbit(vap->iv_tim_bitmap, aid);
vap->iv_ps_pending--;
}
/* NB: we know vap is in RUN state so no need to check */
vap->iv_update_beacon(vap, IEEE80211_BEACON_TIM);
}
IEEE80211_UNLOCK(ic);
return changed;
}
/*
* Save an outbound packet for a node in power-save sleep state.
* The new packet is placed on the node's saved queue, and the TIM
* is changed, if necessary.
*/
int
ieee80211_pwrsave(struct ieee80211_node *ni, struct mbuf *m)
{
struct ieee80211_psq *psq = &ni->ni_psq;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_psq_head *qhead;
int qlen, age;
IEEE80211_PSQ_LOCK(psq);
if (psq->psq_len >= psq->psq_maxlen) {
psq->psq_drops++;
IEEE80211_PSQ_UNLOCK(psq);
IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
"pwr save q overflow, drops %d (size %d)",
psq->psq_drops, psq->psq_len);
#ifdef IEEE80211_DEBUG
if (ieee80211_msg_dumppkts(vap))
ieee80211_dump_pkt(ni->ni_ic, mtod(m, caddr_t),
m->m_len, -1, -1);
#endif
psq_mfree(m);
return ENOSPC;
}
/*
* Tag the frame with it's expiry time and insert it in
* the appropriate queue. The aging interval is 4 times
* the listen interval specified by the station. Frames
* that sit around too long are reclaimed using this
* information.
*/
/* TU -> secs. XXX handle overflow? */
age = IEEE80211_TU_TO_MS((ni->ni_intval * ic->ic_bintval) << 2) / 1000;
/*
* Encapsulated frames go on the high priority queue,
* other stuff goes on the low priority queue. We use
* this to order frames returned out of the driver
* ahead of frames we collect in ieee80211_start.
*/
if (m->m_flags & M_ENCAP)
qhead = &psq->psq_head[0];
else
qhead = &psq->psq_head[1];
if (qhead->tail == NULL) {
struct mbuf *mh;
qhead->head = m;
/*
* Take care to adjust age when inserting the first
* frame of a queue and the other queue already has
* frames. We need to preserve the age difference
* relationship so ieee80211_node_psq_age works.
*/
if (qhead == &psq->psq_head[1]) {
mh = psq->psq_head[0].head;
if (mh != NULL)
age-= M_AGE_GET(mh);
} else {
mh = psq->psq_head[1].head;
if (mh != NULL) {
int nage = M_AGE_GET(mh) - age;
/* XXX is clamping to zero good 'nuf? */
M_AGE_SET(mh, nage < 0 ? 0 : nage);
}
}
} else {
qhead->tail->m_nextpkt = m;
age -= M_AGE_GET(qhead->head);
}
KASSERT(age >= 0, ("age %d", age));
M_AGE_SET(m, age);
m->m_nextpkt = NULL;
qhead->tail = m;
qhead->len++;
qlen = ++(psq->psq_len);
IEEE80211_PSQ_UNLOCK(psq);
IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
"save frame with age %d, %u now queued", age, qlen);
if (qlen == 1 && vap->iv_set_tim != NULL)
vap->iv_set_tim(ni, 1);
return 0;
}
/*
* Move frames from the ps q to the vap's send queue
* and/or the driver's send queue; and kick the start
* method for each, as appropriate. Note we're careful
* to preserve packet ordering here.
*/
static void
pwrsave_flushq(struct ieee80211_node *ni)
{
struct ieee80211_psq *psq = &ni->ni_psq;
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211_psq_head *qhead;
- struct ifnet *parent, *ifp;
struct mbuf *parent_q = NULL, *ifp_q = NULL;
struct mbuf *m;
IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
"flush ps queue, %u packets queued", psq->psq_len);
IEEE80211_PSQ_LOCK(psq);
qhead = &psq->psq_head[0]; /* 802.11 frames */
if (qhead->head != NULL) {
/* XXX could dispatch through vap and check M_ENCAP */
- parent = vap->iv_ic->ic_ifp;
/* XXX need different driver interface */
/* XXX bypasses q max and OACTIVE */
parent_q = qhead->head;
qhead->head = qhead->tail = NULL;
qhead->len = 0;
- } else
- parent = NULL;
+ }
qhead = &psq->psq_head[1]; /* 802.3 frames */
if (qhead->head != NULL) {
- ifp = vap->iv_ifp;
/* XXX need different driver interface */
/* XXX bypasses q max and OACTIVE */
ifp_q = qhead->head;
qhead->head = qhead->tail = NULL;
qhead->len = 0;
- } else
- ifp = NULL;
+ }
psq->psq_len = 0;
IEEE80211_PSQ_UNLOCK(psq);
/* NB: do this outside the psq lock */
/* XXX packets might get reordered if parent is OACTIVE */
/* parent frames, should be encapsulated */
- if (parent != NULL) {
- while (parent_q != NULL) {
- m = parent_q;
- parent_q = m->m_nextpkt;
- m->m_nextpkt = NULL;
- /* must be encapsulated */
- KASSERT((m->m_flags & M_ENCAP),
- ("%s: parentq with non-M_ENCAP frame!\n",
- __func__));
- /*
- * For encaped frames, we need to free the node
- * reference upon failure.
- */
- if (ieee80211_parent_xmitpkt(ic, m) != 0)
- ieee80211_free_node(ni);
- }
+ while (parent_q != NULL) {
+ m = parent_q;
+ parent_q = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ /* must be encapsulated */
+ KASSERT((m->m_flags & M_ENCAP),
+ ("%s: parentq with non-M_ENCAP frame!\n",
+ __func__));
+ /*
+ * For encaped frames, we need to free the node
+ * reference upon failure.
+ */
+ if (ieee80211_parent_xmitpkt(ic, m) != 0)
+ ieee80211_free_node(ni);
}
/* VAP frames, aren't encapsulated */
- if (ifp != NULL) {
- while (ifp_q != NULL) {
- m = ifp_q;
- ifp_q = m->m_nextpkt;
- m->m_nextpkt = NULL;
- KASSERT((!(m->m_flags & M_ENCAP)),
- ("%s: vapq with M_ENCAP frame!\n", __func__));
- (void) ieee80211_vap_xmitpkt(vap, m);
- }
+ while (ifp_q != NULL) {
+ m = ifp_q;
+ ifp_q = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+ KASSERT((!(m->m_flags & M_ENCAP)),
+ ("%s: vapq with M_ENCAP frame!\n", __func__));
+ (void) ieee80211_vap_xmitpkt(vap, m);
}
}
/*
* Handle station power-save state change.
*/
void
ieee80211_node_pwrsave(struct ieee80211_node *ni, int enable)
{
struct ieee80211vap *vap = ni->ni_vap;
int update;
update = 0;
if (enable) {
if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) == 0) {
vap->iv_ps_sta++;
update = 1;
}
ni->ni_flags |= IEEE80211_NODE_PWR_MGT;
IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
"power save mode on, %u sta's in ps mode", vap->iv_ps_sta);
if (update)
vap->iv_update_ps(vap, vap->iv_ps_sta);
} else {
if (ni->ni_flags & IEEE80211_NODE_PWR_MGT) {
vap->iv_ps_sta--;
update = 1;
}
ni->ni_flags &= ~IEEE80211_NODE_PWR_MGT;
IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
"power save mode off, %u sta's in ps mode", vap->iv_ps_sta);
/* NB: order here is intentional so TIM is clear before flush */
if (vap->iv_set_tim != NULL)
vap->iv_set_tim(ni, 0);
if (update) {
/* NB if no sta's in ps, driver should flush mc q */
vap->iv_update_ps(vap, vap->iv_ps_sta);
}
if (ni->ni_psq.psq_len != 0)
pwrsave_flushq(ni);
}
}
/*
* Handle power-save state change in station mode.
*/
void
ieee80211_sta_pwrsave(struct ieee80211vap *vap, int enable)
{
struct ieee80211_node *ni = vap->iv_bss;
if (!((enable != 0) ^ ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) != 0)))
return;
IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
"sta power save mode %s", enable ? "on" : "off");
if (!enable) {
ni->ni_flags &= ~IEEE80211_NODE_PWR_MGT;
ieee80211_send_nulldata(ieee80211_ref_node(ni));
/*
* Flush any queued frames; we can do this immediately
* because we know they'll be queued behind the null
* data frame we send the ap.
* XXX can we use a data frame to take us out of ps?
*/
if (ni->ni_psq.psq_len != 0)
pwrsave_flushq(ni);
} else {
ni->ni_flags |= IEEE80211_NODE_PWR_MGT;
ieee80211_send_nulldata(ieee80211_ref_node(ni));
}
}
/*
* Handle being notified that we have data available for us in a TIM/ATIM.
*
* This may schedule a transition from _SLEEP -> _RUN if it's appropriate.
*
* In STA mode, we may have put to sleep during scan and need to be dragged
* back out of powersave mode.
*/
void
ieee80211_sta_tim_notify(struct ieee80211vap *vap, int set)
{
struct ieee80211com *ic = vap->iv_ic;
/*
* Schedule the driver state change. It'll happen at some point soon.
* Since the hardware shouldn't know that we're running just yet
* (and thus tell the peer that we're awake before we actually wake
* up said hardware), we leave the actual node state transition
* up to the transition to RUN.
*
* XXX TODO: verify that the transition to RUN will wake up the
* BSS node!
*/
IEEE80211_LOCK(vap->iv_ic);
if (set == 1 && vap->iv_state == IEEE80211_S_SLEEP) {
ieee80211_new_state_locked(vap, IEEE80211_S_RUN, 0);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER,
"%s: TIM=%d; wakeup\n", __func__, set);
} else if ((set == 1) && (ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN)) {
/*
* XXX only do this if we're in RUN state?
*/
IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER,
"%s: wake up from bgscan vap sleep\n",
__func__);
/*
* We may be in BGSCAN mode - this means the VAP is is in STA
* mode powersave. If it is, we need to wake it up so we
* can process outbound traffic.
*/
vap->iv_sta_ps(vap, 0);
}
IEEE80211_UNLOCK(vap->iv_ic);
}
/*
* Timer check on whether the VAP has had any transmit activity.
*
* This may schedule a transition from _RUN -> _SLEEP if it's appropriate.
*/
void
ieee80211_sta_ps_timer_check(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
/* XXX lock assert */
/* For no, only do this in STA mode */
if (! (vap->iv_caps & IEEE80211_C_SWSLEEP))
goto out;
if (vap->iv_opmode != IEEE80211_M_STA)
goto out;
/* If we're not at run state, bail */
if (vap->iv_state != IEEE80211_S_RUN)
goto out;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER,
"%s: lastdata=%llu, ticks=%llu\n",
__func__, (unsigned long long) ic->ic_lastdata,
(unsigned long long) ticks);
/* If powersave is disabled on the VAP, don't bother */
if (! (vap->iv_flags & IEEE80211_F_PMGTON))
goto out;
/* If we've done any data within our idle interval, bail */
/* XXX hard-coded to one second for now, ew! */
if (time_after(ic->ic_lastdata + 500, ticks))
goto out;
/*
* Signify we're going into power save and transition the
* node to powersave.
*/
if ((vap->iv_bss->ni_flags & IEEE80211_NODE_PWR_MGT) == 0)
vap->iv_sta_ps(vap, 1);
/*
* XXX The driver has to handle the fact that we're going
* to sleep but frames may still be transmitted;
* hopefully it and/or us will do the right thing and mark any
* transmitted frames with PWRMGT set to 1.
*/
ieee80211_new_state_locked(vap, IEEE80211_S_SLEEP, 0);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_POWER,
"%s: time delta=%d msec\n", __func__,
(int) ticks_to_msecs(ticks - ic->ic_lastdata));
out:
return;
}
Index: head/sys/net80211/ieee80211_proto.c
===================================================================
--- head/sys/net80211/ieee80211_proto.c (revision 287196)
+++ head/sys/net80211/ieee80211_proto.c (revision 287197)
@@ -1,2033 +1,2029 @@
/*-
* Copyright (c) 2001 Atsushi Onoe
* Copyright (c) 2002-2008 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* IEEE 802.11 protocol support.
*/
#include "opt_inet.h"
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h> /* XXX for ether_sprintf */
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_adhoc.h>
#include <net80211/ieee80211_sta.h>
#include <net80211/ieee80211_hostap.h>
#include <net80211/ieee80211_wds.h>
#ifdef IEEE80211_SUPPORT_MESH
#include <net80211/ieee80211_mesh.h>
#endif
#include <net80211/ieee80211_monitor.h>
#include <net80211/ieee80211_input.h>
/* XXX tunables */
#define AGGRESSIVE_MODE_SWITCH_HYSTERESIS 3 /* pkts / 100ms */
#define HIGH_PRI_SWITCH_THRESH 10 /* pkts / 100ms */
const char *ieee80211_mgt_subtype_name[] = {
"assoc_req", "assoc_resp", "reassoc_req", "reassoc_resp",
"probe_req", "probe_resp", "reserved#6", "reserved#7",
"beacon", "atim", "disassoc", "auth",
"deauth", "action", "action_noack", "reserved#15"
};
const char *ieee80211_ctl_subtype_name[] = {
"reserved#0", "reserved#1", "reserved#2", "reserved#3",
"reserved#3", "reserved#5", "reserved#6", "reserved#7",
"reserved#8", "reserved#9", "ps_poll", "rts",
"cts", "ack", "cf_end", "cf_end_ack"
};
const char *ieee80211_opmode_name[IEEE80211_OPMODE_MAX] = {
"IBSS", /* IEEE80211_M_IBSS */
"STA", /* IEEE80211_M_STA */
"WDS", /* IEEE80211_M_WDS */
"AHDEMO", /* IEEE80211_M_AHDEMO */
"HOSTAP", /* IEEE80211_M_HOSTAP */
"MONITOR", /* IEEE80211_M_MONITOR */
"MBSS" /* IEEE80211_M_MBSS */
};
const char *ieee80211_state_name[IEEE80211_S_MAX] = {
"INIT", /* IEEE80211_S_INIT */
"SCAN", /* IEEE80211_S_SCAN */
"AUTH", /* IEEE80211_S_AUTH */
"ASSOC", /* IEEE80211_S_ASSOC */
"CAC", /* IEEE80211_S_CAC */
"RUN", /* IEEE80211_S_RUN */
"CSA", /* IEEE80211_S_CSA */
"SLEEP", /* IEEE80211_S_SLEEP */
};
const char *ieee80211_wme_acnames[] = {
"WME_AC_BE",
"WME_AC_BK",
"WME_AC_VI",
"WME_AC_VO",
"WME_UPSD",
};
static void beacon_miss(void *, int);
static void beacon_swmiss(void *, int);
static void parent_updown(void *, int);
static void update_mcast(void *, int);
static void update_promisc(void *, int);
static void update_channel(void *, int);
static void update_chw(void *, int);
static void ieee80211_newstate_cb(void *, int);
static int
null_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
const struct ieee80211_bpf_params *params)
{
ic_printf(ni->ni_ic, "missing ic_raw_xmit callback, drop frame\n");
m_freem(m);
return ENETDOWN;
}
void
ieee80211_proto_attach(struct ieee80211com *ic)
{
- struct ifnet *ifp = ic->ic_ifp;
+ uint8_t hdrlen;
/* override the 802.3 setting */
- ifp->if_hdrlen = ic->ic_headroom
+ hdrlen = ic->ic_headroom
+ sizeof(struct ieee80211_qosframe_addr4)
+ IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN
+ IEEE80211_WEP_EXTIVLEN;
/* XXX no way to recalculate on ifdetach */
- if (ALIGN(ifp->if_hdrlen) > max_linkhdr) {
+ if (ALIGN(hdrlen) > max_linkhdr) {
/* XXX sanity check... */
- max_linkhdr = ALIGN(ifp->if_hdrlen);
+ max_linkhdr = ALIGN(hdrlen);
max_hdr = max_linkhdr + max_protohdr;
max_datalen = MHLEN - max_hdr;
}
ic->ic_protmode = IEEE80211_PROT_CTSONLY;
- TASK_INIT(&ic->ic_parent_task, 0, parent_updown, ifp);
+ TASK_INIT(&ic->ic_parent_task, 0, parent_updown, ic);
TASK_INIT(&ic->ic_mcast_task, 0, update_mcast, ic);
TASK_INIT(&ic->ic_promisc_task, 0, update_promisc, ic);
TASK_INIT(&ic->ic_chan_task, 0, update_channel, ic);
TASK_INIT(&ic->ic_bmiss_task, 0, beacon_miss, ic);
TASK_INIT(&ic->ic_chw_task, 0, update_chw, ic);
ic->ic_wme.wme_hipri_switch_hysteresis =
AGGRESSIVE_MODE_SWITCH_HYSTERESIS;
/* initialize management frame handlers */
ic->ic_send_mgmt = ieee80211_send_mgmt;
ic->ic_raw_xmit = null_raw_xmit;
ieee80211_adhoc_attach(ic);
ieee80211_sta_attach(ic);
ieee80211_wds_attach(ic);
ieee80211_hostap_attach(ic);
#ifdef IEEE80211_SUPPORT_MESH
ieee80211_mesh_attach(ic);
#endif
ieee80211_monitor_attach(ic);
}
void
ieee80211_proto_detach(struct ieee80211com *ic)
{
ieee80211_monitor_detach(ic);
#ifdef IEEE80211_SUPPORT_MESH
ieee80211_mesh_detach(ic);
#endif
ieee80211_hostap_detach(ic);
ieee80211_wds_detach(ic);
ieee80211_adhoc_detach(ic);
ieee80211_sta_detach(ic);
}
static void
null_update_beacon(struct ieee80211vap *vap, int item)
{
}
void
ieee80211_proto_vattach(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = vap->iv_ifp;
int i;
/* override the 802.3 setting */
- ifp->if_hdrlen = ic->ic_ifp->if_hdrlen;
+ ifp->if_hdrlen = ic->ic_headroom
+ + sizeof(struct ieee80211_qosframe_addr4)
+ + IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN
+ + IEEE80211_WEP_EXTIVLEN;
vap->iv_rtsthreshold = IEEE80211_RTS_DEFAULT;
vap->iv_fragthreshold = IEEE80211_FRAG_DEFAULT;
vap->iv_bmiss_max = IEEE80211_BMISS_MAX;
callout_init_mtx(&vap->iv_swbmiss, IEEE80211_LOCK_OBJ(ic), 0);
callout_init(&vap->iv_mgtsend, 1);
TASK_INIT(&vap->iv_nstate_task, 0, ieee80211_newstate_cb, vap);
TASK_INIT(&vap->iv_swbmiss_task, 0, beacon_swmiss, vap);
/*
* Install default tx rate handling: no fixed rate, lowest
* supported rate for mgmt and multicast frames. Default
* max retry count. These settings can be changed by the
* driver and/or user applications.
*/
for (i = IEEE80211_MODE_11A; i < IEEE80211_MODE_MAX; i++) {
const struct ieee80211_rateset *rs = &ic->ic_sup_rates[i];
vap->iv_txparms[i].ucastrate = IEEE80211_FIXED_RATE_NONE;
/*
* Setting the management rate to MCS 0 assumes that the
* BSS Basic rate set is empty and the BSS Basic MCS set
* is not.
*
* Since we're not checking this, default to the lowest
* defined rate for this mode.
*
* At least one 11n AP (DLINK DIR-825) is reported to drop
* some MCS management traffic (eg BA response frames.)
*
* See also: 9.6.0 of the 802.11n-2009 specification.
*/
#ifdef NOTYET
if (i == IEEE80211_MODE_11NA || i == IEEE80211_MODE_11NG) {
vap->iv_txparms[i].mgmtrate = 0 | IEEE80211_RATE_MCS;
vap->iv_txparms[i].mcastrate = 0 | IEEE80211_RATE_MCS;
} else {
vap->iv_txparms[i].mgmtrate =
rs->rs_rates[0] & IEEE80211_RATE_VAL;
vap->iv_txparms[i].mcastrate =
rs->rs_rates[0] & IEEE80211_RATE_VAL;
}
#endif
vap->iv_txparms[i].mgmtrate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
vap->iv_txparms[i].mcastrate = rs->rs_rates[0] & IEEE80211_RATE_VAL;
vap->iv_txparms[i].maxretry = IEEE80211_TXMAX_DEFAULT;
}
vap->iv_roaming = IEEE80211_ROAMING_AUTO;
vap->iv_update_beacon = null_update_beacon;
vap->iv_deliver_data = ieee80211_deliver_data;
/* attach support for operating mode */
ic->ic_vattach[vap->iv_opmode](vap);
}
void
ieee80211_proto_vdetach(struct ieee80211vap *vap)
{
#define FREEAPPIE(ie) do { \
if (ie != NULL) \
IEEE80211_FREE(ie, M_80211_NODE_IE); \
} while (0)
/*
* Detach operating mode module.
*/
if (vap->iv_opdetach != NULL)
vap->iv_opdetach(vap);
/*
* This should not be needed as we detach when reseting
* the state but be conservative here since the
* authenticator may do things like spawn kernel threads.
*/
if (vap->iv_auth->ia_detach != NULL)
vap->iv_auth->ia_detach(vap);
/*
* Detach any ACL'ator.
*/
if (vap->iv_acl != NULL)
vap->iv_acl->iac_detach(vap);
FREEAPPIE(vap->iv_appie_beacon);
FREEAPPIE(vap->iv_appie_probereq);
FREEAPPIE(vap->iv_appie_proberesp);
FREEAPPIE(vap->iv_appie_assocreq);
FREEAPPIE(vap->iv_appie_assocresp);
FREEAPPIE(vap->iv_appie_wpa);
#undef FREEAPPIE
}
/*
* Simple-minded authenticator module support.
*/
#define IEEE80211_AUTH_MAX (IEEE80211_AUTH_WPA+1)
/* XXX well-known names */
static const char *auth_modnames[IEEE80211_AUTH_MAX] = {
"wlan_internal", /* IEEE80211_AUTH_NONE */
"wlan_internal", /* IEEE80211_AUTH_OPEN */
"wlan_internal", /* IEEE80211_AUTH_SHARED */
"wlan_xauth", /* IEEE80211_AUTH_8021X */
"wlan_internal", /* IEEE80211_AUTH_AUTO */
"wlan_xauth", /* IEEE80211_AUTH_WPA */
};
static const struct ieee80211_authenticator *authenticators[IEEE80211_AUTH_MAX];
static const struct ieee80211_authenticator auth_internal = {
.ia_name = "wlan_internal",
.ia_attach = NULL,
.ia_detach = NULL,
.ia_node_join = NULL,
.ia_node_leave = NULL,
};
/*
* Setup internal authenticators once; they are never unregistered.
*/
static void
ieee80211_auth_setup(void)
{
ieee80211_authenticator_register(IEEE80211_AUTH_OPEN, &auth_internal);
ieee80211_authenticator_register(IEEE80211_AUTH_SHARED, &auth_internal);
ieee80211_authenticator_register(IEEE80211_AUTH_AUTO, &auth_internal);
}
SYSINIT(wlan_auth, SI_SUB_DRIVERS, SI_ORDER_FIRST, ieee80211_auth_setup, NULL);
const struct ieee80211_authenticator *
ieee80211_authenticator_get(int auth)
{
if (auth >= IEEE80211_AUTH_MAX)
return NULL;
if (authenticators[auth] == NULL)
ieee80211_load_module(auth_modnames[auth]);
return authenticators[auth];
}
void
ieee80211_authenticator_register(int type,
const struct ieee80211_authenticator *auth)
{
if (type >= IEEE80211_AUTH_MAX)
return;
authenticators[type] = auth;
}
void
ieee80211_authenticator_unregister(int type)
{
if (type >= IEEE80211_AUTH_MAX)
return;
authenticators[type] = NULL;
}
/*
* Very simple-minded ACL module support.
*/
/* XXX just one for now */
static const struct ieee80211_aclator *acl = NULL;
void
ieee80211_aclator_register(const struct ieee80211_aclator *iac)
{
printf("wlan: %s acl policy registered\n", iac->iac_name);
acl = iac;
}
void
ieee80211_aclator_unregister(const struct ieee80211_aclator *iac)
{
if (acl == iac)
acl = NULL;
printf("wlan: %s acl policy unregistered\n", iac->iac_name);
}
const struct ieee80211_aclator *
ieee80211_aclator_get(const char *name)
{
if (acl == NULL)
ieee80211_load_module("wlan_acl");
return acl != NULL && strcmp(acl->iac_name, name) == 0 ? acl : NULL;
}
void
ieee80211_print_essid(const uint8_t *essid, int len)
{
const uint8_t *p;
int i;
if (len > IEEE80211_NWID_LEN)
len = IEEE80211_NWID_LEN;
/* determine printable or not */
for (i = 0, p = essid; i < len; i++, p++) {
if (*p < ' ' || *p > 0x7e)
break;
}
if (i == len) {
printf("\"");
for (i = 0, p = essid; i < len; i++, p++)
printf("%c", *p);
printf("\"");
} else {
printf("0x");
for (i = 0, p = essid; i < len; i++, p++)
printf("%02x", *p);
}
}
void
ieee80211_dump_pkt(struct ieee80211com *ic,
const uint8_t *buf, int len, int rate, int rssi)
{
const struct ieee80211_frame *wh;
int i;
wh = (const struct ieee80211_frame *)buf;
switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
case IEEE80211_FC1_DIR_NODS:
printf("NODS %s", ether_sprintf(wh->i_addr2));
printf("->%s", ether_sprintf(wh->i_addr1));
printf("(%s)", ether_sprintf(wh->i_addr3));
break;
case IEEE80211_FC1_DIR_TODS:
printf("TODS %s", ether_sprintf(wh->i_addr2));
printf("->%s", ether_sprintf(wh->i_addr3));
printf("(%s)", ether_sprintf(wh->i_addr1));
break;
case IEEE80211_FC1_DIR_FROMDS:
printf("FRDS %s", ether_sprintf(wh->i_addr3));
printf("->%s", ether_sprintf(wh->i_addr1));
printf("(%s)", ether_sprintf(wh->i_addr2));
break;
case IEEE80211_FC1_DIR_DSTODS:
printf("DSDS %s", ether_sprintf((const uint8_t *)&wh[1]));
printf("->%s", ether_sprintf(wh->i_addr3));
printf("(%s", ether_sprintf(wh->i_addr2));
printf("->%s)", ether_sprintf(wh->i_addr1));
break;
}
switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
case IEEE80211_FC0_TYPE_DATA:
printf(" data");
break;
case IEEE80211_FC0_TYPE_MGT:
printf(" %s", ieee80211_mgt_subtype_name[
(wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK)
>> IEEE80211_FC0_SUBTYPE_SHIFT]);
break;
default:
printf(" type#%d", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK);
break;
}
if (IEEE80211_QOS_HAS_SEQ(wh)) {
const struct ieee80211_qosframe *qwh =
(const struct ieee80211_qosframe *)buf;
printf(" QoS [TID %u%s]", qwh->i_qos[0] & IEEE80211_QOS_TID,
qwh->i_qos[0] & IEEE80211_QOS_ACKPOLICY ? " ACM" : "");
}
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
int off;
off = ieee80211_anyhdrspace(ic, wh);
printf(" WEP [IV %.02x %.02x %.02x",
buf[off+0], buf[off+1], buf[off+2]);
if (buf[off+IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)
printf(" %.02x %.02x %.02x",
buf[off+4], buf[off+5], buf[off+6]);
printf(" KID %u]", buf[off+IEEE80211_WEP_IVLEN] >> 6);
}
if (rate >= 0)
printf(" %dM", rate / 2);
if (rssi >= 0)
printf(" +%d", rssi);
printf("\n");
if (len > 0) {
for (i = 0; i < len; i++) {
if ((i & 1) == 0)
printf(" ");
printf("%02x", buf[i]);
}
printf("\n");
}
}
static __inline int
findrix(const struct ieee80211_rateset *rs, int r)
{
int i;
for (i = 0; i < rs->rs_nrates; i++)
if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == r)
return i;
return -1;
}
int
ieee80211_fix_rate(struct ieee80211_node *ni,
struct ieee80211_rateset *nrs, int flags)
{
#define RV(v) ((v) & IEEE80211_RATE_VAL)
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
int i, j, rix, error;
int okrate, badrate, fixedrate, ucastrate;
const struct ieee80211_rateset *srs;
uint8_t r;
error = 0;
okrate = badrate = 0;
ucastrate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].ucastrate;
if (ucastrate != IEEE80211_FIXED_RATE_NONE) {
/*
* Workaround awkwardness with fixed rate. We are called
* to check both the legacy rate set and the HT rate set
* but we must apply any legacy fixed rate check only to the
* legacy rate set and vice versa. We cannot tell what type
* of rate set we've been given (legacy or HT) but we can
* distinguish the fixed rate type (MCS have 0x80 set).
* So to deal with this the caller communicates whether to
* check MCS or legacy rate using the flags and we use the
* type of any fixed rate to avoid applying an MCS to a
* legacy rate and vice versa.
*/
if (ucastrate & 0x80) {
if (flags & IEEE80211_F_DOFRATE)
flags &= ~IEEE80211_F_DOFRATE;
} else if ((ucastrate & 0x80) == 0) {
if (flags & IEEE80211_F_DOFMCS)
flags &= ~IEEE80211_F_DOFMCS;
}
/* NB: required to make MCS match below work */
ucastrate &= IEEE80211_RATE_VAL;
}
fixedrate = IEEE80211_FIXED_RATE_NONE;
/*
* XXX we are called to process both MCS and legacy rates;
* we must use the appropriate basic rate set or chaos will
* ensue; for now callers that want MCS must supply
* IEEE80211_F_DOBRS; at some point we'll need to split this
* function so there are two variants, one for MCS and one
* for legacy rates.
*/
if (flags & IEEE80211_F_DOBRS)
srs = (const struct ieee80211_rateset *)
ieee80211_get_suphtrates(ic, ni->ni_chan);
else
srs = ieee80211_get_suprates(ic, ni->ni_chan);
for (i = 0; i < nrs->rs_nrates; ) {
if (flags & IEEE80211_F_DOSORT) {
/*
* Sort rates.
*/
for (j = i + 1; j < nrs->rs_nrates; j++) {
if (RV(nrs->rs_rates[i]) > RV(nrs->rs_rates[j])) {
r = nrs->rs_rates[i];
nrs->rs_rates[i] = nrs->rs_rates[j];
nrs->rs_rates[j] = r;
}
}
}
r = nrs->rs_rates[i] & IEEE80211_RATE_VAL;
badrate = r;
/*
* Check for fixed rate.
*/
if (r == ucastrate)
fixedrate = r;
/*
* Check against supported rates.
*/
rix = findrix(srs, r);
if (flags & IEEE80211_F_DONEGO) {
if (rix < 0) {
/*
* A rate in the node's rate set is not
* supported. If this is a basic rate and we
* are operating as a STA then this is an error.
* Otherwise we just discard/ignore the rate.
*/
if ((flags & IEEE80211_F_JOIN) &&
(nrs->rs_rates[i] & IEEE80211_RATE_BASIC))
error++;
} else if ((flags & IEEE80211_F_JOIN) == 0) {
/*
* Overwrite with the supported rate
* value so any basic rate bit is set.
*/
nrs->rs_rates[i] = srs->rs_rates[rix];
}
}
if ((flags & IEEE80211_F_DODEL) && rix < 0) {
/*
* Delete unacceptable rates.
*/
nrs->rs_nrates--;
for (j = i; j < nrs->rs_nrates; j++)
nrs->rs_rates[j] = nrs->rs_rates[j + 1];
nrs->rs_rates[j] = 0;
continue;
}
if (rix >= 0)
okrate = nrs->rs_rates[i];
i++;
}
if (okrate == 0 || error != 0 ||
((flags & (IEEE80211_F_DOFRATE|IEEE80211_F_DOFMCS)) &&
fixedrate != ucastrate)) {
IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE | IEEE80211_MSG_11N, ni,
"%s: flags 0x%x okrate %d error %d fixedrate 0x%x "
"ucastrate %x\n", __func__, fixedrate, ucastrate, flags);
return badrate | IEEE80211_RATE_BASIC;
} else
return RV(okrate);
#undef RV
}
/*
* Reset 11g-related state.
*/
void
ieee80211_reset_erp(struct ieee80211com *ic)
{
ic->ic_flags &= ~IEEE80211_F_USEPROT;
ic->ic_nonerpsta = 0;
ic->ic_longslotsta = 0;
/*
* Short slot time is enabled only when operating in 11g
* and not in an IBSS. We must also honor whether or not
* the driver is capable of doing it.
*/
ieee80211_set_shortslottime(ic,
IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
IEEE80211_IS_CHAN_HT(ic->ic_curchan) ||
(IEEE80211_IS_CHAN_ANYG(ic->ic_curchan) &&
ic->ic_opmode == IEEE80211_M_HOSTAP &&
(ic->ic_caps & IEEE80211_C_SHSLOT)));
/*
* Set short preamble and ERP barker-preamble flags.
*/
if (IEEE80211_IS_CHAN_A(ic->ic_curchan) ||
(ic->ic_caps & IEEE80211_C_SHPREAMBLE)) {
ic->ic_flags |= IEEE80211_F_SHPREAMBLE;
ic->ic_flags &= ~IEEE80211_F_USEBARKER;
} else {
ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE;
ic->ic_flags |= IEEE80211_F_USEBARKER;
}
}
/*
* Set the short slot time state and notify the driver.
*/
void
ieee80211_set_shortslottime(struct ieee80211com *ic, int onoff)
{
if (onoff)
ic->ic_flags |= IEEE80211_F_SHSLOT;
else
ic->ic_flags &= ~IEEE80211_F_SHSLOT;
/* notify driver */
if (ic->ic_updateslot != NULL)
ic->ic_updateslot(ic);
}
/*
* Check if the specified rate set supports ERP.
* NB: the rate set is assumed to be sorted.
*/
int
ieee80211_iserp_rateset(const struct ieee80211_rateset *rs)
{
static const int rates[] = { 2, 4, 11, 22, 12, 24, 48 };
int i, j;
if (rs->rs_nrates < nitems(rates))
return 0;
for (i = 0; i < nitems(rates); i++) {
for (j = 0; j < rs->rs_nrates; j++) {
int r = rs->rs_rates[j] & IEEE80211_RATE_VAL;
if (rates[i] == r)
goto next;
if (r > rates[i])
return 0;
}
return 0;
next:
;
}
return 1;
}
/*
* Mark the basic rates for the rate table based on the
* operating mode. For real 11g we mark all the 11b rates
* and 6, 12, and 24 OFDM. For 11b compatibility we mark only
* 11b rates. There's also a pseudo 11a-mode used to mark only
* the basic OFDM rates.
*/
static void
setbasicrates(struct ieee80211_rateset *rs,
enum ieee80211_phymode mode, int add)
{
static const struct ieee80211_rateset basic[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_11A] = { 3, { 12, 24, 48 } },
[IEEE80211_MODE_11B] = { 2, { 2, 4 } },
/* NB: mixed b/g */
[IEEE80211_MODE_11G] = { 4, { 2, 4, 11, 22 } },
[IEEE80211_MODE_TURBO_A] = { 3, { 12, 24, 48 } },
[IEEE80211_MODE_TURBO_G] = { 4, { 2, 4, 11, 22 } },
[IEEE80211_MODE_STURBO_A] = { 3, { 12, 24, 48 } },
[IEEE80211_MODE_HALF] = { 3, { 6, 12, 24 } },
[IEEE80211_MODE_QUARTER] = { 3, { 3, 6, 12 } },
[IEEE80211_MODE_11NA] = { 3, { 12, 24, 48 } },
/* NB: mixed b/g */
[IEEE80211_MODE_11NG] = { 4, { 2, 4, 11, 22 } },
};
int i, j;
for (i = 0; i < rs->rs_nrates; i++) {
if (!add)
rs->rs_rates[i] &= IEEE80211_RATE_VAL;
for (j = 0; j < basic[mode].rs_nrates; j++)
if (basic[mode].rs_rates[j] == rs->rs_rates[i]) {
rs->rs_rates[i] |= IEEE80211_RATE_BASIC;
break;
}
}
}
/*
* Set the basic rates in a rate set.
*/
void
ieee80211_setbasicrates(struct ieee80211_rateset *rs,
enum ieee80211_phymode mode)
{
setbasicrates(rs, mode, 0);
}
/*
* Add basic rates to a rate set.
*/
void
ieee80211_addbasicrates(struct ieee80211_rateset *rs,
enum ieee80211_phymode mode)
{
setbasicrates(rs, mode, 1);
}
/*
* WME protocol support.
*
* The default 11a/b/g/n parameters come from the WiFi Alliance WMM
* System Interopability Test Plan (v1.4, Appendix F) and the 802.11n
* Draft 2.0 Test Plan (Appendix D).
*
* Static/Dynamic Turbo mode settings come from Atheros.
*/
typedef struct phyParamType {
uint8_t aifsn;
uint8_t logcwmin;
uint8_t logcwmax;
uint16_t txopLimit;
uint8_t acm;
} paramType;
static const struct phyParamType phyParamForAC_BE[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = { 3, 4, 6, 0, 0 },
[IEEE80211_MODE_11A] = { 3, 4, 6, 0, 0 },
[IEEE80211_MODE_11B] = { 3, 4, 6, 0, 0 },
[IEEE80211_MODE_11G] = { 3, 4, 6, 0, 0 },
[IEEE80211_MODE_FH] = { 3, 4, 6, 0, 0 },
[IEEE80211_MODE_TURBO_A]= { 2, 3, 5, 0, 0 },
[IEEE80211_MODE_TURBO_G]= { 2, 3, 5, 0, 0 },
[IEEE80211_MODE_STURBO_A]={ 2, 3, 5, 0, 0 },
[IEEE80211_MODE_HALF] = { 3, 4, 6, 0, 0 },
[IEEE80211_MODE_QUARTER]= { 3, 4, 6, 0, 0 },
[IEEE80211_MODE_11NA] = { 3, 4, 6, 0, 0 },
[IEEE80211_MODE_11NG] = { 3, 4, 6, 0, 0 },
};
static const struct phyParamType phyParamForAC_BK[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = { 7, 4, 10, 0, 0 },
[IEEE80211_MODE_11A] = { 7, 4, 10, 0, 0 },
[IEEE80211_MODE_11B] = { 7, 4, 10, 0, 0 },
[IEEE80211_MODE_11G] = { 7, 4, 10, 0, 0 },
[IEEE80211_MODE_FH] = { 7, 4, 10, 0, 0 },
[IEEE80211_MODE_TURBO_A]= { 7, 3, 10, 0, 0 },
[IEEE80211_MODE_TURBO_G]= { 7, 3, 10, 0, 0 },
[IEEE80211_MODE_STURBO_A]={ 7, 3, 10, 0, 0 },
[IEEE80211_MODE_HALF] = { 7, 4, 10, 0, 0 },
[IEEE80211_MODE_QUARTER]= { 7, 4, 10, 0, 0 },
[IEEE80211_MODE_11NA] = { 7, 4, 10, 0, 0 },
[IEEE80211_MODE_11NG] = { 7, 4, 10, 0, 0 },
};
static const struct phyParamType phyParamForAC_VI[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = { 1, 3, 4, 94, 0 },
[IEEE80211_MODE_11A] = { 1, 3, 4, 94, 0 },
[IEEE80211_MODE_11B] = { 1, 3, 4, 188, 0 },
[IEEE80211_MODE_11G] = { 1, 3, 4, 94, 0 },
[IEEE80211_MODE_FH] = { 1, 3, 4, 188, 0 },
[IEEE80211_MODE_TURBO_A]= { 1, 2, 3, 94, 0 },
[IEEE80211_MODE_TURBO_G]= { 1, 2, 3, 94, 0 },
[IEEE80211_MODE_STURBO_A]={ 1, 2, 3, 94, 0 },
[IEEE80211_MODE_HALF] = { 1, 3, 4, 94, 0 },
[IEEE80211_MODE_QUARTER]= { 1, 3, 4, 94, 0 },
[IEEE80211_MODE_11NA] = { 1, 3, 4, 94, 0 },
[IEEE80211_MODE_11NG] = { 1, 3, 4, 94, 0 },
};
static const struct phyParamType phyParamForAC_VO[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = { 1, 2, 3, 47, 0 },
[IEEE80211_MODE_11A] = { 1, 2, 3, 47, 0 },
[IEEE80211_MODE_11B] = { 1, 2, 3, 102, 0 },
[IEEE80211_MODE_11G] = { 1, 2, 3, 47, 0 },
[IEEE80211_MODE_FH] = { 1, 2, 3, 102, 0 },
[IEEE80211_MODE_TURBO_A]= { 1, 2, 2, 47, 0 },
[IEEE80211_MODE_TURBO_G]= { 1, 2, 2, 47, 0 },
[IEEE80211_MODE_STURBO_A]={ 1, 2, 2, 47, 0 },
[IEEE80211_MODE_HALF] = { 1, 2, 3, 47, 0 },
[IEEE80211_MODE_QUARTER]= { 1, 2, 3, 47, 0 },
[IEEE80211_MODE_11NA] = { 1, 2, 3, 47, 0 },
[IEEE80211_MODE_11NG] = { 1, 2, 3, 47, 0 },
};
static const struct phyParamType bssPhyParamForAC_BE[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = { 3, 4, 10, 0, 0 },
[IEEE80211_MODE_11A] = { 3, 4, 10, 0, 0 },
[IEEE80211_MODE_11B] = { 3, 4, 10, 0, 0 },
[IEEE80211_MODE_11G] = { 3, 4, 10, 0, 0 },
[IEEE80211_MODE_FH] = { 3, 4, 10, 0, 0 },
[IEEE80211_MODE_TURBO_A]= { 2, 3, 10, 0, 0 },
[IEEE80211_MODE_TURBO_G]= { 2, 3, 10, 0, 0 },
[IEEE80211_MODE_STURBO_A]={ 2, 3, 10, 0, 0 },
[IEEE80211_MODE_HALF] = { 3, 4, 10, 0, 0 },
[IEEE80211_MODE_QUARTER]= { 3, 4, 10, 0, 0 },
[IEEE80211_MODE_11NA] = { 3, 4, 10, 0, 0 },
[IEEE80211_MODE_11NG] = { 3, 4, 10, 0, 0 },
};
static const struct phyParamType bssPhyParamForAC_VI[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = { 2, 3, 4, 94, 0 },
[IEEE80211_MODE_11A] = { 2, 3, 4, 94, 0 },
[IEEE80211_MODE_11B] = { 2, 3, 4, 188, 0 },
[IEEE80211_MODE_11G] = { 2, 3, 4, 94, 0 },
[IEEE80211_MODE_FH] = { 2, 3, 4, 188, 0 },
[IEEE80211_MODE_TURBO_A]= { 2, 2, 3, 94, 0 },
[IEEE80211_MODE_TURBO_G]= { 2, 2, 3, 94, 0 },
[IEEE80211_MODE_STURBO_A]={ 2, 2, 3, 94, 0 },
[IEEE80211_MODE_HALF] = { 2, 3, 4, 94, 0 },
[IEEE80211_MODE_QUARTER]= { 2, 3, 4, 94, 0 },
[IEEE80211_MODE_11NA] = { 2, 3, 4, 94, 0 },
[IEEE80211_MODE_11NG] = { 2, 3, 4, 94, 0 },
};
static const struct phyParamType bssPhyParamForAC_VO[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = { 2, 2, 3, 47, 0 },
[IEEE80211_MODE_11A] = { 2, 2, 3, 47, 0 },
[IEEE80211_MODE_11B] = { 2, 2, 3, 102, 0 },
[IEEE80211_MODE_11G] = { 2, 2, 3, 47, 0 },
[IEEE80211_MODE_FH] = { 2, 2, 3, 102, 0 },
[IEEE80211_MODE_TURBO_A]= { 1, 2, 2, 47, 0 },
[IEEE80211_MODE_TURBO_G]= { 1, 2, 2, 47, 0 },
[IEEE80211_MODE_STURBO_A]={ 1, 2, 2, 47, 0 },
[IEEE80211_MODE_HALF] = { 2, 2, 3, 47, 0 },
[IEEE80211_MODE_QUARTER]= { 2, 2, 3, 47, 0 },
[IEEE80211_MODE_11NA] = { 2, 2, 3, 47, 0 },
[IEEE80211_MODE_11NG] = { 2, 2, 3, 47, 0 },
};
static void
_setifsparams(struct wmeParams *wmep, const paramType *phy)
{
wmep->wmep_aifsn = phy->aifsn;
wmep->wmep_logcwmin = phy->logcwmin;
wmep->wmep_logcwmax = phy->logcwmax;
wmep->wmep_txopLimit = phy->txopLimit;
}
static void
setwmeparams(struct ieee80211vap *vap, const char *type, int ac,
struct wmeParams *wmep, const paramType *phy)
{
wmep->wmep_acm = phy->acm;
_setifsparams(wmep, phy);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
"set %s (%s) [acm %u aifsn %u logcwmin %u logcwmax %u txop %u]\n",
ieee80211_wme_acnames[ac], type,
wmep->wmep_acm, wmep->wmep_aifsn, wmep->wmep_logcwmin,
wmep->wmep_logcwmax, wmep->wmep_txopLimit);
}
static void
ieee80211_wme_initparams_locked(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_wme_state *wme = &ic->ic_wme;
const paramType *pPhyParam, *pBssPhyParam;
struct wmeParams *wmep;
enum ieee80211_phymode mode;
int i;
IEEE80211_LOCK_ASSERT(ic);
if ((ic->ic_caps & IEEE80211_C_WME) == 0 || ic->ic_nrunning > 1)
return;
/*
* Clear the wme cap_info field so a qoscount from a previous
* vap doesn't confuse later code which only parses the beacon
* field and updates hardware when said field changes.
* Otherwise the hardware is programmed with defaults, not what
* the beacon actually announces.
*/
wme->wme_wmeChanParams.cap_info = 0;
/*
* Select mode; we can be called early in which case we
* always use auto mode. We know we'll be called when
* entering the RUN state with bsschan setup properly
* so state will eventually get set correctly
*/
if (ic->ic_bsschan != IEEE80211_CHAN_ANYC)
mode = ieee80211_chan2mode(ic->ic_bsschan);
else
mode = IEEE80211_MODE_AUTO;
for (i = 0; i < WME_NUM_AC; i++) {
switch (i) {
case WME_AC_BK:
pPhyParam = &phyParamForAC_BK[mode];
pBssPhyParam = &phyParamForAC_BK[mode];
break;
case WME_AC_VI:
pPhyParam = &phyParamForAC_VI[mode];
pBssPhyParam = &bssPhyParamForAC_VI[mode];
break;
case WME_AC_VO:
pPhyParam = &phyParamForAC_VO[mode];
pBssPhyParam = &bssPhyParamForAC_VO[mode];
break;
case WME_AC_BE:
default:
pPhyParam = &phyParamForAC_BE[mode];
pBssPhyParam = &bssPhyParamForAC_BE[mode];
break;
}
wmep = &wme->wme_wmeChanParams.cap_wmeParams[i];
if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
setwmeparams(vap, "chan", i, wmep, pPhyParam);
} else {
setwmeparams(vap, "chan", i, wmep, pBssPhyParam);
}
wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[i];
setwmeparams(vap, "bss ", i, wmep, pBssPhyParam);
}
/* NB: check ic_bss to avoid NULL deref on initial attach */
if (vap->iv_bss != NULL) {
/*
* Calculate agressive mode switching threshold based
* on beacon interval. This doesn't need locking since
* we're only called before entering the RUN state at
* which point we start sending beacon frames.
*/
wme->wme_hipri_switch_thresh =
(HIGH_PRI_SWITCH_THRESH * vap->iv_bss->ni_intval) / 100;
wme->wme_flags &= ~WME_F_AGGRMODE;
ieee80211_wme_updateparams(vap);
}
}
void
ieee80211_wme_initparams(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
ieee80211_wme_initparams_locked(vap);
IEEE80211_UNLOCK(ic);
}
/*
* Update WME parameters for ourself and the BSS.
*/
void
ieee80211_wme_updateparams_locked(struct ieee80211vap *vap)
{
static const paramType aggrParam[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = { 2, 4, 10, 64, 0 },
[IEEE80211_MODE_11A] = { 2, 4, 10, 64, 0 },
[IEEE80211_MODE_11B] = { 2, 5, 10, 64, 0 },
[IEEE80211_MODE_11G] = { 2, 4, 10, 64, 0 },
[IEEE80211_MODE_FH] = { 2, 5, 10, 64, 0 },
[IEEE80211_MODE_TURBO_A] = { 1, 3, 10, 64, 0 },
[IEEE80211_MODE_TURBO_G] = { 1, 3, 10, 64, 0 },
[IEEE80211_MODE_STURBO_A] = { 1, 3, 10, 64, 0 },
[IEEE80211_MODE_HALF] = { 2, 4, 10, 64, 0 },
[IEEE80211_MODE_QUARTER] = { 2, 4, 10, 64, 0 },
[IEEE80211_MODE_11NA] = { 2, 4, 10, 64, 0 }, /* XXXcheck*/
[IEEE80211_MODE_11NG] = { 2, 4, 10, 64, 0 }, /* XXXcheck*/
};
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_wme_state *wme = &ic->ic_wme;
const struct wmeParams *wmep;
struct wmeParams *chanp, *bssp;
enum ieee80211_phymode mode;
int i;
int do_aggrmode = 0;
/*
* Set up the channel access parameters for the physical
* device. First populate the configured settings.
*/
for (i = 0; i < WME_NUM_AC; i++) {
chanp = &wme->wme_chanParams.cap_wmeParams[i];
wmep = &wme->wme_wmeChanParams.cap_wmeParams[i];
chanp->wmep_aifsn = wmep->wmep_aifsn;
chanp->wmep_logcwmin = wmep->wmep_logcwmin;
chanp->wmep_logcwmax = wmep->wmep_logcwmax;
chanp->wmep_txopLimit = wmep->wmep_txopLimit;
chanp = &wme->wme_bssChanParams.cap_wmeParams[i];
wmep = &wme->wme_wmeBssChanParams.cap_wmeParams[i];
chanp->wmep_aifsn = wmep->wmep_aifsn;
chanp->wmep_logcwmin = wmep->wmep_logcwmin;
chanp->wmep_logcwmax = wmep->wmep_logcwmax;
chanp->wmep_txopLimit = wmep->wmep_txopLimit;
}
/*
* Select mode; we can be called early in which case we
* always use auto mode. We know we'll be called when
* entering the RUN state with bsschan setup properly
* so state will eventually get set correctly
*/
if (ic->ic_bsschan != IEEE80211_CHAN_ANYC)
mode = ieee80211_chan2mode(ic->ic_bsschan);
else
mode = IEEE80211_MODE_AUTO;
/*
* This implements agressive mode as found in certain
* vendors' AP's. When there is significant high
* priority (VI/VO) traffic in the BSS throttle back BE
* traffic by using conservative parameters. Otherwise
* BE uses agressive params to optimize performance of
* legacy/non-QoS traffic.
*/
/* Hostap? Only if aggressive mode is enabled */
if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
(wme->wme_flags & WME_F_AGGRMODE) != 0)
do_aggrmode = 1;
/*
* Station? Only if we're in a non-QoS BSS.
*/
else if ((vap->iv_opmode == IEEE80211_M_STA &&
(vap->iv_bss->ni_flags & IEEE80211_NODE_QOS) == 0))
do_aggrmode = 1;
/*
* IBSS? Only if we we have WME enabled.
*/
else if ((vap->iv_opmode == IEEE80211_M_IBSS) &&
(vap->iv_flags & IEEE80211_F_WME))
do_aggrmode = 1;
/*
* If WME is disabled on this VAP, default to aggressive mode
* regardless of the configuration.
*/
if ((vap->iv_flags & IEEE80211_F_WME) == 0)
do_aggrmode = 1;
/* XXX WDS? */
/* XXX MBSS? */
if (do_aggrmode) {
chanp = &wme->wme_chanParams.cap_wmeParams[WME_AC_BE];
bssp = &wme->wme_bssChanParams.cap_wmeParams[WME_AC_BE];
chanp->wmep_aifsn = bssp->wmep_aifsn = aggrParam[mode].aifsn;
chanp->wmep_logcwmin = bssp->wmep_logcwmin =
aggrParam[mode].logcwmin;
chanp->wmep_logcwmax = bssp->wmep_logcwmax =
aggrParam[mode].logcwmax;
chanp->wmep_txopLimit = bssp->wmep_txopLimit =
(vap->iv_flags & IEEE80211_F_BURST) ?
aggrParam[mode].txopLimit : 0;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
"update %s (chan+bss) [acm %u aifsn %u logcwmin %u "
"logcwmax %u txop %u]\n", ieee80211_wme_acnames[WME_AC_BE],
chanp->wmep_acm, chanp->wmep_aifsn, chanp->wmep_logcwmin,
chanp->wmep_logcwmax, chanp->wmep_txopLimit);
}
/*
* Change the contention window based on the number of associated
* stations. If the number of associated stations is 1 and
* aggressive mode is enabled, lower the contention window even
* further.
*/
if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
ic->ic_sta_assoc < 2 && (wme->wme_flags & WME_F_AGGRMODE) != 0) {
static const uint8_t logCwMin[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = 3,
[IEEE80211_MODE_11A] = 3,
[IEEE80211_MODE_11B] = 4,
[IEEE80211_MODE_11G] = 3,
[IEEE80211_MODE_FH] = 4,
[IEEE80211_MODE_TURBO_A] = 3,
[IEEE80211_MODE_TURBO_G] = 3,
[IEEE80211_MODE_STURBO_A] = 3,
[IEEE80211_MODE_HALF] = 3,
[IEEE80211_MODE_QUARTER] = 3,
[IEEE80211_MODE_11NA] = 3,
[IEEE80211_MODE_11NG] = 3,
};
chanp = &wme->wme_chanParams.cap_wmeParams[WME_AC_BE];
bssp = &wme->wme_bssChanParams.cap_wmeParams[WME_AC_BE];
chanp->wmep_logcwmin = bssp->wmep_logcwmin = logCwMin[mode];
IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
"update %s (chan+bss) logcwmin %u\n",
ieee80211_wme_acnames[WME_AC_BE], chanp->wmep_logcwmin);
}
/*
* Arrange for the beacon update.
*
* XXX what about MBSS, WDS?
*/
if (vap->iv_opmode == IEEE80211_M_HOSTAP
|| vap->iv_opmode == IEEE80211_M_IBSS) {
/*
* Arrange for a beacon update and bump the parameter
* set number so associated stations load the new values.
*/
wme->wme_bssChanParams.cap_info =
(wme->wme_bssChanParams.cap_info+1) & WME_QOSINFO_COUNT;
ieee80211_beacon_notify(vap, IEEE80211_BEACON_WME);
}
wme->wme_update(ic);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_WME,
"%s: WME params updated, cap_info 0x%x\n", __func__,
vap->iv_opmode == IEEE80211_M_STA ?
wme->wme_wmeChanParams.cap_info :
wme->wme_bssChanParams.cap_info);
}
void
ieee80211_wme_updateparams(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
if (ic->ic_caps & IEEE80211_C_WME) {
IEEE80211_LOCK(ic);
ieee80211_wme_updateparams_locked(vap);
IEEE80211_UNLOCK(ic);
}
}
static void
parent_updown(void *arg, int npending)
{
- struct ifnet *parent = arg;
+ struct ieee80211com *ic = arg;
- parent->if_ioctl(parent, SIOCSIFFLAGS, NULL);
+ ic->ic_parent(ic);
}
static void
update_mcast(void *arg, int npending)
{
struct ieee80211com *ic = arg;
ic->ic_update_mcast(ic);
}
static void
update_promisc(void *arg, int npending)
{
struct ieee80211com *ic = arg;
ic->ic_update_promisc(ic);
}
static void
update_channel(void *arg, int npending)
{
struct ieee80211com *ic = arg;
ic->ic_set_channel(ic);
ieee80211_radiotap_chan_change(ic);
}
static void
update_chw(void *arg, int npending)
{
struct ieee80211com *ic = arg;
/*
* XXX should we defer the channel width _config_ update until now?
*/
ic->ic_update_chw(ic);
}
/*
* Block until the parent is in a known state. This is
* used after any operations that dispatch a task (e.g.
* to auto-configure the parent device up/down).
*/
void
ieee80211_waitfor_parent(struct ieee80211com *ic)
{
taskqueue_block(ic->ic_tq);
ieee80211_draintask(ic, &ic->ic_parent_task);
ieee80211_draintask(ic, &ic->ic_mcast_task);
ieee80211_draintask(ic, &ic->ic_promisc_task);
ieee80211_draintask(ic, &ic->ic_chan_task);
ieee80211_draintask(ic, &ic->ic_bmiss_task);
ieee80211_draintask(ic, &ic->ic_chw_task);
taskqueue_unblock(ic->ic_tq);
}
/*
* Check to see whether the current channel needs reset.
*
* Some devices don't handle being given an invalid channel
* in their operating mode very well (eg wpi(4) will throw a
* firmware exception.)
*
* Return 0 if we're ok, 1 if the channel needs to be reset.
*
* See PR kern/202502.
*/
static int
ieee80211_start_check_reset_chan(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
if ((vap->iv_opmode == IEEE80211_M_IBSS &&
IEEE80211_IS_CHAN_NOADHOC(ic->ic_curchan)) ||
(vap->iv_opmode == IEEE80211_M_HOSTAP &&
IEEE80211_IS_CHAN_NOHOSTAP(ic->ic_curchan)))
return (1);
return (0);
}
/*
* Reset the curchan to a known good state.
*/
static void
ieee80211_start_reset_chan(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
ic->ic_curchan = &ic->ic_channels[0];
}
/*
* Start a vap running. If this is the first vap to be
* set running on the underlying device then we
* automatically bring the device up.
*/
void
ieee80211_start_locked(struct ieee80211vap *vap)
{
struct ifnet *ifp = vap->iv_ifp;
struct ieee80211com *ic = vap->iv_ic;
- struct ifnet *parent = ic->ic_ifp;
IEEE80211_LOCK_ASSERT(ic);
IEEE80211_DPRINTF(vap,
IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
"start running, %d vaps running\n", ic->ic_nrunning);
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
/*
* Mark us running. Note that it's ok to do this first;
* if we need to bring the parent device up we defer that
* to avoid dropping the com lock. We expect the device
* to respond to being marked up by calling back into us
* through ieee80211_start_all at which point we'll come
* back in here and complete the work.
*/
ifp->if_drv_flags |= IFF_DRV_RUNNING;
/*
* We are not running; if this we are the first vap
* to be brought up auto-up the parent if necessary.
*/
- if (ic->ic_nrunning++ == 0 &&
- (parent->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if (ic->ic_nrunning++ == 0) {
/* reset the channel to a known good channel */
if (ieee80211_start_check_reset_chan(vap))
ieee80211_start_reset_chan(vap);
IEEE80211_DPRINTF(vap,
IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
- "%s: up parent %s\n", __func__, parent->if_xname);
- parent->if_flags |= IFF_UP;
+ "%s: up parent %s\n", __func__, ic->ic_name);
ieee80211_runtask(ic, &ic->ic_parent_task);
return;
}
}
/*
* If the parent is up and running, then kick the
* 802.11 state machine as appropriate.
*/
- if ((parent->if_drv_flags & IFF_DRV_RUNNING) &&
- vap->iv_roaming != IEEE80211_ROAMING_MANUAL) {
+ if (vap->iv_roaming != IEEE80211_ROAMING_MANUAL) {
if (vap->iv_opmode == IEEE80211_M_STA) {
#if 0
/* XXX bypasses scan too easily; disable for now */
/*
* Try to be intelligent about clocking the state
* machine. If we're currently in RUN state then
* we should be able to apply any new state/parameters
* simply by re-associating. Otherwise we need to
* re-scan to select an appropriate ap.
*/
if (vap->iv_state >= IEEE80211_S_RUN)
ieee80211_new_state_locked(vap,
IEEE80211_S_ASSOC, 1);
else
#endif
ieee80211_new_state_locked(vap,
IEEE80211_S_SCAN, 0);
} else {
/*
* For monitor+wds mode there's nothing to do but
* start running. Otherwise if this is the first
* vap to be brought up, start a scan which may be
* preempted if the station is locked to a particular
* channel.
*/
vap->iv_flags_ext |= IEEE80211_FEXT_REINIT;
if (vap->iv_opmode == IEEE80211_M_MONITOR ||
vap->iv_opmode == IEEE80211_M_WDS)
ieee80211_new_state_locked(vap,
IEEE80211_S_RUN, -1);
else
ieee80211_new_state_locked(vap,
IEEE80211_S_SCAN, 0);
}
}
}
/*
* Start a single vap.
*/
void
ieee80211_init(void *arg)
{
struct ieee80211vap *vap = arg;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
"%s\n", __func__);
IEEE80211_LOCK(vap->iv_ic);
ieee80211_start_locked(vap);
IEEE80211_UNLOCK(vap->iv_ic);
}
/*
* Start all runnable vap's on a device.
*/
void
ieee80211_start_all(struct ieee80211com *ic)
{
struct ieee80211vap *vap;
IEEE80211_LOCK(ic);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
struct ifnet *ifp = vap->iv_ifp;
if (IFNET_IS_UP_RUNNING(ifp)) /* NB: avoid recursion */
ieee80211_start_locked(vap);
}
IEEE80211_UNLOCK(ic);
}
/*
* Stop a vap. We force it down using the state machine
* then mark it's ifnet not running. If this is the last
* vap running on the underlying device then we close it
* too to insure it will be properly initialized when the
* next vap is brought up.
*/
void
ieee80211_stop_locked(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = vap->iv_ifp;
- struct ifnet *parent = ic->ic_ifp;
IEEE80211_LOCK_ASSERT(ic);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
"stop running, %d vaps running\n", ic->ic_nrunning);
ieee80211_new_state_locked(vap, IEEE80211_S_INIT, -1);
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
ifp->if_drv_flags &= ~IFF_DRV_RUNNING; /* mark us stopped */
- if (--ic->ic_nrunning == 0 &&
- (parent->if_drv_flags & IFF_DRV_RUNNING)) {
+ if (--ic->ic_nrunning == 0) {
IEEE80211_DPRINTF(vap,
IEEE80211_MSG_STATE | IEEE80211_MSG_DEBUG,
- "down parent %s\n", parent->if_xname);
- parent->if_flags &= ~IFF_UP;
+ "down parent %s\n", ic->ic_name);
ieee80211_runtask(ic, &ic->ic_parent_task);
}
}
}
void
ieee80211_stop(struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
ieee80211_stop_locked(vap);
IEEE80211_UNLOCK(ic);
}
/*
* Stop all vap's running on a device.
*/
void
ieee80211_stop_all(struct ieee80211com *ic)
{
struct ieee80211vap *vap;
IEEE80211_LOCK(ic);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
struct ifnet *ifp = vap->iv_ifp;
if (IFNET_IS_UP_RUNNING(ifp)) /* NB: avoid recursion */
ieee80211_stop_locked(vap);
}
IEEE80211_UNLOCK(ic);
ieee80211_waitfor_parent(ic);
}
/*
* Stop all vap's running on a device and arrange
* for those that were running to be resumed.
*/
void
ieee80211_suspend_all(struct ieee80211com *ic)
{
struct ieee80211vap *vap;
IEEE80211_LOCK(ic);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
struct ifnet *ifp = vap->iv_ifp;
if (IFNET_IS_UP_RUNNING(ifp)) { /* NB: avoid recursion */
vap->iv_flags_ext |= IEEE80211_FEXT_RESUME;
ieee80211_stop_locked(vap);
}
}
IEEE80211_UNLOCK(ic);
ieee80211_waitfor_parent(ic);
}
/*
* Start all vap's marked for resume.
*/
void
ieee80211_resume_all(struct ieee80211com *ic)
{
struct ieee80211vap *vap;
IEEE80211_LOCK(ic);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
struct ifnet *ifp = vap->iv_ifp;
if (!IFNET_IS_UP_RUNNING(ifp) &&
(vap->iv_flags_ext & IEEE80211_FEXT_RESUME)) {
vap->iv_flags_ext &= ~IEEE80211_FEXT_RESUME;
ieee80211_start_locked(vap);
}
}
IEEE80211_UNLOCK(ic);
}
void
ieee80211_beacon_miss(struct ieee80211com *ic)
{
IEEE80211_LOCK(ic);
if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) {
/* Process in a taskq, the handler may reenter the driver */
ieee80211_runtask(ic, &ic->ic_bmiss_task);
}
IEEE80211_UNLOCK(ic);
}
static void
beacon_miss(void *arg, int npending)
{
struct ieee80211com *ic = arg;
struct ieee80211vap *vap;
IEEE80211_LOCK(ic);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
/*
* We only pass events through for sta vap's in RUN state;
* may be too restrictive but for now this saves all the
* handlers duplicating these checks.
*/
if (vap->iv_opmode == IEEE80211_M_STA &&
vap->iv_state >= IEEE80211_S_RUN &&
vap->iv_bmiss != NULL)
vap->iv_bmiss(vap);
}
IEEE80211_UNLOCK(ic);
}
static void
beacon_swmiss(void *arg, int npending)
{
struct ieee80211vap *vap = arg;
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK(ic);
if (vap->iv_state == IEEE80211_S_RUN) {
/* XXX Call multiple times if npending > zero? */
vap->iv_bmiss(vap);
}
IEEE80211_UNLOCK(ic);
}
/*
* Software beacon miss handling. Check if any beacons
* were received in the last period. If not post a
* beacon miss; otherwise reset the counter.
*/
void
ieee80211_swbmiss(void *arg)
{
struct ieee80211vap *vap = arg;
struct ieee80211com *ic = vap->iv_ic;
IEEE80211_LOCK_ASSERT(ic);
/* XXX sleep state? */
KASSERT(vap->iv_state == IEEE80211_S_RUN,
("wrong state %d", vap->iv_state));
if (ic->ic_flags & IEEE80211_F_SCAN) {
/*
* If scanning just ignore and reset state. If we get a
* bmiss after coming out of scan because we haven't had
* time to receive a beacon then we should probe the AP
* before posting a real bmiss (unless iv_bmiss_max has
* been artifiically lowered). A cleaner solution might
* be to disable the timer on scan start/end but to handle
* case of multiple sta vap's we'd need to disable the
* timers of all affected vap's.
*/
vap->iv_swbmiss_count = 0;
} else if (vap->iv_swbmiss_count == 0) {
if (vap->iv_bmiss != NULL)
ieee80211_runtask(ic, &vap->iv_swbmiss_task);
} else
vap->iv_swbmiss_count = 0;
callout_reset(&vap->iv_swbmiss, vap->iv_swbmiss_period,
ieee80211_swbmiss, vap);
}
/*
* Start an 802.11h channel switch. We record the parameters,
* mark the operation pending, notify each vap through the
* beacon update mechanism so it can update the beacon frame
* contents, and then switch vap's to CSA state to block outbound
* traffic. Devices that handle CSA directly can use the state
* switch to do the right thing so long as they call
* ieee80211_csa_completeswitch when it's time to complete the
* channel change. Devices that depend on the net80211 layer can
* use ieee80211_beacon_update to handle the countdown and the
* channel switch.
*/
void
ieee80211_csa_startswitch(struct ieee80211com *ic,
struct ieee80211_channel *c, int mode, int count)
{
struct ieee80211vap *vap;
IEEE80211_LOCK_ASSERT(ic);
ic->ic_csa_newchan = c;
ic->ic_csa_mode = mode;
ic->ic_csa_count = count;
ic->ic_flags |= IEEE80211_F_CSAPENDING;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_MBSS)
ieee80211_beacon_notify(vap, IEEE80211_BEACON_CSA);
/* switch to CSA state to block outbound traffic */
if (vap->iv_state == IEEE80211_S_RUN)
ieee80211_new_state_locked(vap, IEEE80211_S_CSA, 0);
}
ieee80211_notify_csa(ic, c, mode, count);
}
/*
* Complete the channel switch by transitioning all CSA VAPs to RUN.
* This is called by both the completion and cancellation functions
* so each VAP is placed back in the RUN state and can thus transmit.
*/
static void
csa_completeswitch(struct ieee80211com *ic)
{
struct ieee80211vap *vap;
ic->ic_csa_newchan = NULL;
ic->ic_flags &= ~IEEE80211_F_CSAPENDING;
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_state == IEEE80211_S_CSA)
ieee80211_new_state_locked(vap, IEEE80211_S_RUN, 0);
}
/*
* Complete an 802.11h channel switch started by ieee80211_csa_startswitch.
* We clear state and move all vap's in CSA state to RUN state
* so they can again transmit.
*
* Although this may not be completely correct, update the BSS channel
* for each VAP to the newly configured channel. The setcurchan sets
* the current operating channel for the interface (so the radio does
* switch over) but the VAP BSS isn't updated, leading to incorrectly
* reported information via ioctl.
*/
void
ieee80211_csa_completeswitch(struct ieee80211com *ic)
{
struct ieee80211vap *vap;
IEEE80211_LOCK_ASSERT(ic);
KASSERT(ic->ic_flags & IEEE80211_F_CSAPENDING, ("csa not pending"));
ieee80211_setcurchan(ic, ic->ic_csa_newchan);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_state == IEEE80211_S_CSA)
vap->iv_bss->ni_chan = ic->ic_curchan;
csa_completeswitch(ic);
}
/*
* Cancel an 802.11h channel switch started by ieee80211_csa_startswitch.
* We clear state and move all vap's in CSA state to RUN state
* so they can again transmit.
*/
void
ieee80211_csa_cancelswitch(struct ieee80211com *ic)
{
IEEE80211_LOCK_ASSERT(ic);
csa_completeswitch(ic);
}
/*
* Complete a DFS CAC started by ieee80211_dfs_cac_start.
* We clear state and move all vap's in CAC state to RUN state.
*/
void
ieee80211_cac_completeswitch(struct ieee80211vap *vap0)
{
struct ieee80211com *ic = vap0->iv_ic;
struct ieee80211vap *vap;
IEEE80211_LOCK(ic);
/*
* Complete CAC state change for lead vap first; then
* clock all the other vap's waiting.
*/
KASSERT(vap0->iv_state == IEEE80211_S_CAC,
("wrong state %d", vap0->iv_state));
ieee80211_new_state_locked(vap0, IEEE80211_S_RUN, 0);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_state == IEEE80211_S_CAC)
ieee80211_new_state_locked(vap, IEEE80211_S_RUN, 0);
IEEE80211_UNLOCK(ic);
}
/*
* Force all vap's other than the specified vap to the INIT state
* and mark them as waiting for a scan to complete. These vaps
* will be brought up when the scan completes and the scanning vap
* reaches RUN state by wakeupwaiting.
*/
static void
markwaiting(struct ieee80211vap *vap0)
{
struct ieee80211com *ic = vap0->iv_ic;
struct ieee80211vap *vap;
IEEE80211_LOCK_ASSERT(ic);
/*
* A vap list entry can not disappear since we are running on the
* taskqueue and a vap destroy will queue and drain another state
* change task.
*/
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
if (vap == vap0)
continue;
if (vap->iv_state != IEEE80211_S_INIT) {
/* NB: iv_newstate may drop the lock */
vap->iv_newstate(vap, IEEE80211_S_INIT, 0);
IEEE80211_LOCK_ASSERT(ic);
vap->iv_flags_ext |= IEEE80211_FEXT_SCANWAIT;
}
}
}
/*
* Wakeup all vap's waiting for a scan to complete. This is the
* companion to markwaiting (above) and is used to coordinate
* multiple vaps scanning.
* This is called from the state taskqueue.
*/
static void
wakeupwaiting(struct ieee80211vap *vap0)
{
struct ieee80211com *ic = vap0->iv_ic;
struct ieee80211vap *vap;
IEEE80211_LOCK_ASSERT(ic);
/*
* A vap list entry can not disappear since we are running on the
* taskqueue and a vap destroy will queue and drain another state
* change task.
*/
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
if (vap == vap0)
continue;
if (vap->iv_flags_ext & IEEE80211_FEXT_SCANWAIT) {
vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANWAIT;
/* NB: sta's cannot go INIT->RUN */
/* NB: iv_newstate may drop the lock */
vap->iv_newstate(vap,
vap->iv_opmode == IEEE80211_M_STA ?
IEEE80211_S_SCAN : IEEE80211_S_RUN, 0);
IEEE80211_LOCK_ASSERT(ic);
}
}
}
/*
* Handle post state change work common to all operating modes.
*/
static void
ieee80211_newstate_cb(void *xvap, int npending)
{
struct ieee80211vap *vap = xvap;
struct ieee80211com *ic = vap->iv_ic;
enum ieee80211_state nstate, ostate;
int arg, rc;
IEEE80211_LOCK(ic);
nstate = vap->iv_nstate;
arg = vap->iv_nstate_arg;
if (vap->iv_flags_ext & IEEE80211_FEXT_REINIT) {
/*
* We have been requested to drop back to the INIT before
* proceeding to the new state.
*/
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: %s -> %s arg %d\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[IEEE80211_S_INIT], arg);
vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
IEEE80211_LOCK_ASSERT(ic);
vap->iv_flags_ext &= ~IEEE80211_FEXT_REINIT;
}
ostate = vap->iv_state;
if (nstate == IEEE80211_S_SCAN && ostate != IEEE80211_S_INIT) {
/*
* SCAN was forced; e.g. on beacon miss. Force other running
* vap's to INIT state and mark them as waiting for the scan to
* complete. This insures they don't interfere with our
* scanning. Since we are single threaded the vaps can not
* transition again while we are executing.
*
* XXX not always right, assumes ap follows sta
*/
markwaiting(vap);
}
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: %s -> %s arg %d\n", __func__,
ieee80211_state_name[ostate], ieee80211_state_name[nstate], arg);
rc = vap->iv_newstate(vap, nstate, arg);
IEEE80211_LOCK_ASSERT(ic);
vap->iv_flags_ext &= ~IEEE80211_FEXT_STATEWAIT;
if (rc != 0) {
/* State transition failed */
KASSERT(rc != EINPROGRESS, ("iv_newstate was deferred"));
KASSERT(nstate != IEEE80211_S_INIT,
("INIT state change failed"));
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: %s returned error %d\n", __func__,
ieee80211_state_name[nstate], rc);
goto done;
}
/* No actual transition, skip post processing */
if (ostate == nstate)
goto done;
if (nstate == IEEE80211_S_RUN) {
/*
* OACTIVE may be set on the vap if the upper layer
* tried to transmit (e.g. IPv6 NDP) before we reach
* RUN state. Clear it and restart xmit.
*
* Note this can also happen as a result of SLEEP->RUN
* (i.e. coming out of power save mode).
*/
vap->iv_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
/*
* XXX TODO Kick-start a VAP queue - this should be a method!
*/
/* bring up any vaps waiting on us */
wakeupwaiting(vap);
} else if (nstate == IEEE80211_S_INIT) {
/*
* Flush the scan cache if we did the last scan (XXX?)
* and flush any frames on send queues from this vap.
* Note the mgt q is used only for legacy drivers and
* will go away shortly.
*/
ieee80211_scan_flush(vap);
/*
* XXX TODO: ic/vap queue flush
*/
}
done:
IEEE80211_UNLOCK(ic);
}
/*
* Public interface for initiating a state machine change.
* This routine single-threads the request and coordinates
* the scheduling of multiple vaps for the purpose of selecting
* an operating channel. Specifically the following scenarios
* are handled:
* o only one vap can be selecting a channel so on transition to
* SCAN state if another vap is already scanning then
* mark the caller for later processing and return without
* doing anything (XXX? expectations by caller of synchronous operation)
* o only one vap can be doing CAC of a channel so on transition to
* CAC state if another vap is already scanning for radar then
* mark the caller for later processing and return without
* doing anything (XXX? expectations by caller of synchronous operation)
* o if another vap is already running when a request is made
* to SCAN then an operating channel has been chosen; bypass
* the scan and just join the channel
*
* Note that the state change call is done through the iv_newstate
* method pointer so any driver routine gets invoked. The driver
* will normally call back into operating mode-specific
* ieee80211_newstate routines (below) unless it needs to completely
* bypass the state machine (e.g. because the firmware has it's
* own idea how things should work). Bypassing the net80211 layer
* is usually a mistake and indicates lack of proper integration
* with the net80211 layer.
*/
int
ieee80211_new_state_locked(struct ieee80211vap *vap,
enum ieee80211_state nstate, int arg)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211vap *vp;
enum ieee80211_state ostate;
int nrunning, nscanning;
IEEE80211_LOCK_ASSERT(ic);
if (vap->iv_flags_ext & IEEE80211_FEXT_STATEWAIT) {
if (vap->iv_nstate == IEEE80211_S_INIT) {
/*
* XXX The vap is being stopped, do no allow any other
* state changes until this is completed.
*/
return -1;
} else if (vap->iv_state != vap->iv_nstate) {
#if 0
/* Warn if the previous state hasn't completed. */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: pending %s -> %s transition lost\n", __func__,
ieee80211_state_name[vap->iv_state],
ieee80211_state_name[vap->iv_nstate]);
#else
/* XXX temporarily enable to identify issues */
if_printf(vap->iv_ifp,
"%s: pending %s -> %s transition lost\n",
__func__, ieee80211_state_name[vap->iv_state],
ieee80211_state_name[vap->iv_nstate]);
#endif
}
}
nrunning = nscanning = 0;
/* XXX can track this state instead of calculating */
TAILQ_FOREACH(vp, &ic->ic_vaps, iv_next) {
if (vp != vap) {
if (vp->iv_state >= IEEE80211_S_RUN)
nrunning++;
/* XXX doesn't handle bg scan */
/* NB: CAC+AUTH+ASSOC treated like SCAN */
else if (vp->iv_state > IEEE80211_S_INIT)
nscanning++;
}
}
ostate = vap->iv_state;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: %s -> %s (nrunning %d nscanning %d)\n", __func__,
ieee80211_state_name[ostate], ieee80211_state_name[nstate],
nrunning, nscanning);
switch (nstate) {
case IEEE80211_S_SCAN:
if (ostate == IEEE80211_S_INIT) {
/*
* INIT -> SCAN happens on initial bringup.
*/
KASSERT(!(nscanning && nrunning),
("%d scanning and %d running", nscanning, nrunning));
if (nscanning) {
/*
* Someone is scanning, defer our state
* change until the work has completed.
*/
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: defer %s -> %s\n",
__func__, ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
vap->iv_flags_ext |= IEEE80211_FEXT_SCANWAIT;
return 0;
}
if (nrunning) {
/*
* Someone is operating; just join the channel
* they have chosen.
*/
/* XXX kill arg? */
/* XXX check each opmode, adhoc? */
if (vap->iv_opmode == IEEE80211_M_STA)
nstate = IEEE80211_S_SCAN;
else
nstate = IEEE80211_S_RUN;
#ifdef IEEE80211_DEBUG
if (nstate != IEEE80211_S_SCAN) {
IEEE80211_DPRINTF(vap,
IEEE80211_MSG_STATE,
"%s: override, now %s -> %s\n",
__func__,
ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
}
#endif
}
}
break;
case IEEE80211_S_RUN:
if (vap->iv_opmode == IEEE80211_M_WDS &&
(vap->iv_flags_ext & IEEE80211_FEXT_WDSLEGACY) &&
nscanning) {
/*
* Legacy WDS with someone else scanning; don't
* go online until that completes as we should
* follow the other vap to the channel they choose.
*/
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: defer %s -> %s (legacy WDS)\n", __func__,
ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
vap->iv_flags_ext |= IEEE80211_FEXT_SCANWAIT;
return 0;
}
if (vap->iv_opmode == IEEE80211_M_HOSTAP &&
IEEE80211_IS_CHAN_DFS(ic->ic_bsschan) &&
(vap->iv_flags_ext & IEEE80211_FEXT_DFS) &&
!IEEE80211_IS_CHAN_CACDONE(ic->ic_bsschan)) {
/*
* This is a DFS channel, transition to CAC state
* instead of RUN. This allows us to initiate
* Channel Availability Check (CAC) as specified
* by 11h/DFS.
*/
nstate = IEEE80211_S_CAC;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_STATE,
"%s: override %s -> %s (DFS)\n", __func__,
ieee80211_state_name[ostate],
ieee80211_state_name[nstate]);
}
break;
case IEEE80211_S_INIT:
/* cancel any scan in progress */
ieee80211_cancel_scan(vap);
if (ostate == IEEE80211_S_INIT ) {
/* XXX don't believe this */
/* INIT -> INIT. nothing to do */
vap->iv_flags_ext &= ~IEEE80211_FEXT_SCANWAIT;
}
/* fall thru... */
default:
break;
}
/* defer the state change to a thread */
vap->iv_nstate = nstate;
vap->iv_nstate_arg = arg;
vap->iv_flags_ext |= IEEE80211_FEXT_STATEWAIT;
ieee80211_runtask(ic, &vap->iv_nstate_task);
return EINPROGRESS;
}
int
ieee80211_new_state(struct ieee80211vap *vap,
enum ieee80211_state nstate, int arg)
{
struct ieee80211com *ic = vap->iv_ic;
int rc;
IEEE80211_LOCK(ic);
rc = ieee80211_new_state_locked(vap, nstate, arg);
IEEE80211_UNLOCK(ic);
return rc;
}
Index: head/sys/net80211/ieee80211_proto.h
===================================================================
--- head/sys/net80211/ieee80211_proto.h (revision 287196)
+++ head/sys/net80211/ieee80211_proto.h (revision 287197)
@@ -1,436 +1,437 @@
/*-
* Copyright (c) 2001 Atsushi Onoe
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _NET80211_IEEE80211_PROTO_H_
#define _NET80211_IEEE80211_PROTO_H_
/*
* 802.11 protocol implementation definitions.
*/
enum ieee80211_state {
IEEE80211_S_INIT = 0, /* default state */
IEEE80211_S_SCAN = 1, /* scanning */
IEEE80211_S_AUTH = 2, /* try to authenticate */
IEEE80211_S_ASSOC = 3, /* try to assoc */
IEEE80211_S_CAC = 4, /* doing channel availability check */
IEEE80211_S_RUN = 5, /* operational (e.g. associated) */
IEEE80211_S_CSA = 6, /* channel switch announce pending */
IEEE80211_S_SLEEP = 7, /* power save */
};
#define IEEE80211_S_MAX (IEEE80211_S_SLEEP+1)
#define IEEE80211_SEND_MGMT(_ni,_type,_arg) \
((*(_ni)->ni_ic->ic_send_mgmt)(_ni, _type, _arg))
extern const char *ieee80211_mgt_subtype_name[];
extern const char *ieee80211_phymode_name[IEEE80211_MODE_MAX];
extern const int ieee80211_opcap[IEEE80211_OPMODE_MAX];
void ieee80211_proto_attach(struct ieee80211com *);
void ieee80211_proto_detach(struct ieee80211com *);
void ieee80211_proto_vattach(struct ieee80211vap *);
void ieee80211_proto_vdetach(struct ieee80211vap *);
-void ieee80211_syncifflag_locked(struct ieee80211com *, int flag);
+void ieee80211_promisc(struct ieee80211vap *, bool);
+void ieee80211_allmulti(struct ieee80211vap *, bool);
void ieee80211_syncflag(struct ieee80211vap *, int flag);
void ieee80211_syncflag_ht(struct ieee80211vap *, int flag);
void ieee80211_syncflag_ext(struct ieee80211vap *, int flag);
#define IEEE80211_R_NF 0x0000001 /* global NF value valid */
#define IEEE80211_R_RSSI 0x0000002 /* global RSSI value valid */
#define IEEE80211_R_C_CHAIN 0x0000004 /* RX chain count valid */
#define IEEE80211_R_C_NF 0x0000008 /* per-chain NF value valid */
#define IEEE80211_R_C_RSSI 0x0000010 /* per-chain RSSI value valid */
#define IEEE80211_R_C_EVM 0x0000020 /* per-chain EVM valid */
#define IEEE80211_R_C_HT40 0x0000040 /* RX'ed packet is 40mhz, pilots 4,5 valid */
#define IEEE80211_R_FREQ 0x0000080 /* Freq value populated, MHz */
#define IEEE80211_R_IEEE 0x0000100 /* IEEE value populated */
#define IEEE80211_R_BAND 0x0000200 /* Frequency band populated */
struct ieee80211_rx_stats {
uint32_t r_flags; /* IEEE80211_R_* flags */
uint8_t c_chain; /* number of RX chains involved */
int16_t c_nf_ctl[IEEE80211_MAX_CHAINS]; /* per-chain NF */
int16_t c_nf_ext[IEEE80211_MAX_CHAINS]; /* per-chain NF */
int16_t c_rssi_ctl[IEEE80211_MAX_CHAINS]; /* per-chain RSSI */
int16_t c_rssi_ext[IEEE80211_MAX_CHAINS]; /* per-chain RSSI */
uint8_t nf; /* global NF */
uint8_t rssi; /* global RSSI */
uint8_t evm[IEEE80211_MAX_CHAINS][IEEE80211_MAX_EVM_PILOTS];
/* per-chain, per-pilot EVM values */
uint16_t c_freq;
uint8_t c_ieee;
};
#define ieee80211_input(ni, m, rssi, nf) \
((ni)->ni_vap->iv_input(ni, m, NULL, rssi, nf))
int ieee80211_input_all(struct ieee80211com *, struct mbuf *, int, int);
int ieee80211_input_mimo(struct ieee80211_node *, struct mbuf *,
struct ieee80211_rx_stats *);
int ieee80211_input_mimo_all(struct ieee80211com *, struct mbuf *,
struct ieee80211_rx_stats *);
struct ieee80211_bpf_params;
int ieee80211_mgmt_output(struct ieee80211_node *, struct mbuf *, int,
struct ieee80211_bpf_params *);
int ieee80211_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
int ieee80211_output(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *ro);
int ieee80211_vap_pkt_send_dest(struct ieee80211vap *, struct mbuf *,
struct ieee80211_node *);
int ieee80211_raw_output(struct ieee80211vap *, struct ieee80211_node *,
struct mbuf *, const struct ieee80211_bpf_params *);
void ieee80211_send_setup(struct ieee80211_node *, struct mbuf *, int, int,
const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
int ieee80211_vap_transmit(struct ifnet *ifp, struct mbuf *m);
void ieee80211_vap_qflush(struct ifnet *ifp);
int ieee80211_send_nulldata(struct ieee80211_node *);
int ieee80211_classify(struct ieee80211_node *, struct mbuf *m);
struct mbuf *ieee80211_mbuf_adjust(struct ieee80211vap *, int,
struct ieee80211_key *, struct mbuf *);
struct mbuf *ieee80211_encap(struct ieee80211vap *, struct ieee80211_node *,
struct mbuf *);
int ieee80211_send_mgmt(struct ieee80211_node *, int, int);
struct ieee80211_appie;
int ieee80211_send_probereq(struct ieee80211_node *ni,
const uint8_t sa[IEEE80211_ADDR_LEN],
const uint8_t da[IEEE80211_ADDR_LEN],
const uint8_t bssid[IEEE80211_ADDR_LEN],
const uint8_t *ssid, size_t ssidlen);
struct mbuf * ieee80211_ff_encap1(struct ieee80211vap *, struct mbuf *,
const struct ether_header *);
void ieee80211_tx_complete(struct ieee80211_node *,
struct mbuf *, int);
/*
* The formation of ProbeResponse frames requires guidance to
* deal with legacy clients. When the client is identified as
* "legacy 11b" ieee80211_send_proberesp is passed this token.
*/
#define IEEE80211_SEND_LEGACY_11B 0x1 /* legacy 11b client */
#define IEEE80211_SEND_LEGACY_11 0x2 /* other legacy client */
#define IEEE80211_SEND_LEGACY 0x3 /* any legacy client */
struct mbuf *ieee80211_alloc_proberesp(struct ieee80211_node *, int);
int ieee80211_send_proberesp(struct ieee80211vap *,
const uint8_t da[IEEE80211_ADDR_LEN], int);
struct mbuf *ieee80211_alloc_rts(struct ieee80211com *ic,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN], uint16_t);
struct mbuf *ieee80211_alloc_cts(struct ieee80211com *,
const uint8_t [IEEE80211_ADDR_LEN], uint16_t);
uint8_t *ieee80211_add_rates(uint8_t *, const struct ieee80211_rateset *);
uint8_t *ieee80211_add_xrates(uint8_t *, const struct ieee80211_rateset *);
uint8_t *ieee80211_add_ssid(uint8_t *, const uint8_t *, u_int);
uint8_t *ieee80211_add_wpa(uint8_t *, const struct ieee80211vap *);
uint8_t *ieee80211_add_rsn(uint8_t *, const struct ieee80211vap *);
uint8_t *ieee80211_add_qos(uint8_t *, const struct ieee80211_node *);
uint16_t ieee80211_getcapinfo(struct ieee80211vap *,
struct ieee80211_channel *);
void ieee80211_reset_erp(struct ieee80211com *);
void ieee80211_set_shortslottime(struct ieee80211com *, int onoff);
int ieee80211_iserp_rateset(const struct ieee80211_rateset *);
void ieee80211_setbasicrates(struct ieee80211_rateset *,
enum ieee80211_phymode);
void ieee80211_addbasicrates(struct ieee80211_rateset *,
enum ieee80211_phymode);
/*
* Return the size of the 802.11 header for a management or data frame.
*/
static __inline int
ieee80211_hdrsize(const void *data)
{
const struct ieee80211_frame *wh = data;
int size = sizeof(struct ieee80211_frame);
/* NB: we don't handle control frames */
KASSERT((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL,
("%s: control frame", __func__));
if (IEEE80211_IS_DSTODS(wh))
size += IEEE80211_ADDR_LEN;
if (IEEE80211_QOS_HAS_SEQ(wh))
size += sizeof(uint16_t);
return size;
}
/*
* Like ieee80211_hdrsize, but handles any type of frame.
*/
static __inline int
ieee80211_anyhdrsize(const void *data)
{
const struct ieee80211_frame *wh = data;
if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
case IEEE80211_FC0_SUBTYPE_CTS:
case IEEE80211_FC0_SUBTYPE_ACK:
return sizeof(struct ieee80211_frame_ack);
case IEEE80211_FC0_SUBTYPE_BAR:
return sizeof(struct ieee80211_frame_bar);
}
return sizeof(struct ieee80211_frame_min);
} else
return ieee80211_hdrsize(data);
}
/*
* Template for an in-kernel authenticator. Authenticators
* register with the protocol code and are typically loaded
* as separate modules as needed. One special authenticator
* is xauth; it intercepts requests so that protocols like
* WPA can be handled in user space.
*/
struct ieee80211_authenticator {
const char *ia_name; /* printable name */
int (*ia_attach)(struct ieee80211vap *);
void (*ia_detach)(struct ieee80211vap *);
void (*ia_node_join)(struct ieee80211_node *);
void (*ia_node_leave)(struct ieee80211_node *);
};
void ieee80211_authenticator_register(int type,
const struct ieee80211_authenticator *);
void ieee80211_authenticator_unregister(int type);
const struct ieee80211_authenticator *ieee80211_authenticator_get(int auth);
struct ieee80211req;
/*
* Template for an MAC ACL policy module. Such modules
* register with the protocol code and are passed the sender's
* address of each received auth frame for validation.
*/
struct ieee80211_aclator {
const char *iac_name; /* printable name */
int (*iac_attach)(struct ieee80211vap *);
void (*iac_detach)(struct ieee80211vap *);
int (*iac_check)(struct ieee80211vap *,
const struct ieee80211_frame *wh);
int (*iac_add)(struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
int (*iac_remove)(struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
int (*iac_flush)(struct ieee80211vap *);
int (*iac_setpolicy)(struct ieee80211vap *, int);
int (*iac_getpolicy)(struct ieee80211vap *);
int (*iac_setioctl)(struct ieee80211vap *, struct ieee80211req *);
int (*iac_getioctl)(struct ieee80211vap *, struct ieee80211req *);
};
void ieee80211_aclator_register(const struct ieee80211_aclator *);
void ieee80211_aclator_unregister(const struct ieee80211_aclator *);
const struct ieee80211_aclator *ieee80211_aclator_get(const char *name);
/* flags for ieee80211_fix_rate() */
#define IEEE80211_F_DOSORT 0x00000001 /* sort rate list */
#define IEEE80211_F_DOFRATE 0x00000002 /* use fixed legacy rate */
#define IEEE80211_F_DONEGO 0x00000004 /* calc negotiated rate */
#define IEEE80211_F_DODEL 0x00000008 /* delete ignore rate */
#define IEEE80211_F_DOBRS 0x00000010 /* check basic rate set */
#define IEEE80211_F_JOIN 0x00000020 /* sta joining our bss */
#define IEEE80211_F_DOFMCS 0x00000040 /* use fixed HT rate */
int ieee80211_fix_rate(struct ieee80211_node *,
struct ieee80211_rateset *, int);
/*
* WME/WMM support.
*/
struct wmeParams {
uint8_t wmep_acm;
uint8_t wmep_aifsn;
uint8_t wmep_logcwmin; /* log2(cwmin) */
uint8_t wmep_logcwmax; /* log2(cwmax) */
uint8_t wmep_txopLimit;
uint8_t wmep_noackPolicy; /* 0 (ack), 1 (no ack) */
};
#define IEEE80211_TXOP_TO_US(_txop) ((_txop)<<5)
#define IEEE80211_US_TO_TXOP(_us) ((_us)>>5)
struct chanAccParams {
uint8_t cap_info; /* version of the current set */
struct wmeParams cap_wmeParams[WME_NUM_AC];
};
struct ieee80211_wme_state {
u_int wme_flags;
#define WME_F_AGGRMODE 0x00000001 /* STATUS: WME agressive mode */
u_int wme_hipri_traffic; /* VI/VO frames in beacon interval */
u_int wme_hipri_switch_thresh;/* agressive mode switch thresh */
u_int wme_hipri_switch_hysteresis;/* agressive mode switch hysteresis */
struct wmeParams wme_params[4]; /* from assoc resp for each AC*/
struct chanAccParams wme_wmeChanParams; /* WME params applied to self */
struct chanAccParams wme_wmeBssChanParams;/* WME params bcast to stations */
struct chanAccParams wme_chanParams; /* params applied to self */
struct chanAccParams wme_bssChanParams; /* params bcast to stations */
int (*wme_update)(struct ieee80211com *);
};
void ieee80211_wme_initparams(struct ieee80211vap *);
void ieee80211_wme_updateparams(struct ieee80211vap *);
void ieee80211_wme_updateparams_locked(struct ieee80211vap *);
/*
* Return the WME TID from a QoS frame. If no TID
* is present return the index for the "non-QoS" entry.
*/
static __inline uint8_t
ieee80211_gettid(const struct ieee80211_frame *wh)
{
uint8_t tid;
if (IEEE80211_QOS_HAS_SEQ(wh)) {
if (IEEE80211_IS_DSTODS(wh))
tid = ((const struct ieee80211_qosframe_addr4 *)wh)->
i_qos[0];
else
tid = ((const struct ieee80211_qosframe *)wh)->i_qos[0];
tid &= IEEE80211_QOS_TID;
} else
tid = IEEE80211_NONQOS_TID;
return tid;
}
void ieee80211_waitfor_parent(struct ieee80211com *);
void ieee80211_start_locked(struct ieee80211vap *);
void ieee80211_init(void *);
void ieee80211_start_all(struct ieee80211com *);
void ieee80211_stop_locked(struct ieee80211vap *);
void ieee80211_stop(struct ieee80211vap *);
void ieee80211_stop_all(struct ieee80211com *);
void ieee80211_suspend_all(struct ieee80211com *);
void ieee80211_resume_all(struct ieee80211com *);
void ieee80211_dturbo_switch(struct ieee80211vap *, int newflags);
void ieee80211_swbmiss(void *arg);
void ieee80211_beacon_miss(struct ieee80211com *);
int ieee80211_new_state(struct ieee80211vap *, enum ieee80211_state, int);
int ieee80211_new_state_locked(struct ieee80211vap *, enum ieee80211_state,
int);
void ieee80211_print_essid(const uint8_t *, int);
void ieee80211_dump_pkt(struct ieee80211com *,
const uint8_t *, int, int, int);
extern const char *ieee80211_opmode_name[];
extern const char *ieee80211_state_name[IEEE80211_S_MAX];
extern const char *ieee80211_wme_acnames[];
/*
* Beacon frames constructed by ieee80211_beacon_alloc
* have the following structure filled in so drivers
* can update the frame later w/ minimal overhead.
*/
struct ieee80211_beacon_offsets {
uint8_t bo_flags[4]; /* update/state flags */
uint16_t *bo_caps; /* capabilities */
uint8_t *bo_cfp; /* start of CFParms element */
uint8_t *bo_tim; /* start of atim/dtim */
uint8_t *bo_wme; /* start of WME parameters */
uint8_t *bo_tdma; /* start of TDMA parameters */
uint8_t *bo_tim_trailer;/* start of fixed-size trailer */
uint16_t bo_tim_len; /* atim/dtim length in bytes */
uint16_t bo_tim_trailer_len;/* tim trailer length in bytes */
uint8_t *bo_erp; /* start of ERP element */
uint8_t *bo_htinfo; /* start of HT info element */
uint8_t *bo_ath; /* start of ATH parameters */
uint8_t *bo_appie; /* start of AppIE element */
uint16_t bo_appie_len; /* AppIE length in bytes */
uint16_t bo_csa_trailer_len;
uint8_t *bo_csa; /* start of CSA element */
uint8_t *bo_quiet; /* start of Quiet element */
uint8_t *bo_meshconf; /* start of MESHCONF element */
uint8_t *bo_spare[3];
};
struct mbuf *ieee80211_beacon_alloc(struct ieee80211_node *,
struct ieee80211_beacon_offsets *);
/*
* Beacon frame updates are signaled through calls to iv_update_beacon
* with one of the IEEE80211_BEACON_* tokens defined below. For devices
* that construct beacon frames on the host this can trigger a rebuild
* or defer the processing. For devices that offload beacon frame
* handling this callback can be used to signal a rebuild. The bo_flags
* array in the ieee80211_beacon_offsets structure is intended to record
* deferred processing requirements; ieee80211_beacon_update uses the
* state to optimize work. Since this structure is owned by the driver
* and not visible to the 802.11 layer drivers must supply an iv_update_beacon
* callback that marks the flag bits and schedules (as necessary) an update.
*/
enum {
IEEE80211_BEACON_CAPS = 0, /* capabilities */
IEEE80211_BEACON_TIM = 1, /* DTIM/ATIM */
IEEE80211_BEACON_WME = 2,
IEEE80211_BEACON_ERP = 3, /* Extended Rate Phy */
IEEE80211_BEACON_HTINFO = 4, /* HT Information */
IEEE80211_BEACON_APPIE = 5, /* Application IE's */
IEEE80211_BEACON_CFP = 6, /* CFParms */
IEEE80211_BEACON_CSA = 7, /* Channel Switch Announcement */
IEEE80211_BEACON_TDMA = 9, /* TDMA Info */
IEEE80211_BEACON_ATH = 10, /* ATH parameters */
IEEE80211_BEACON_MESHCONF = 11, /* Mesh Configuration */
};
int ieee80211_beacon_update(struct ieee80211_node *,
struct ieee80211_beacon_offsets *, struct mbuf *, int mcast);
void ieee80211_csa_startswitch(struct ieee80211com *,
struct ieee80211_channel *, int mode, int count);
void ieee80211_csa_completeswitch(struct ieee80211com *);
void ieee80211_csa_cancelswitch(struct ieee80211com *);
void ieee80211_cac_completeswitch(struct ieee80211vap *);
/*
* Notification methods called from the 802.11 state machine.
* Note that while these are defined here, their implementation
* is OS-specific.
*/
void ieee80211_notify_node_join(struct ieee80211_node *, int newassoc);
void ieee80211_notify_node_leave(struct ieee80211_node *);
void ieee80211_notify_scan_done(struct ieee80211vap *);
void ieee80211_notify_wds_discover(struct ieee80211_node *);
void ieee80211_notify_csa(struct ieee80211com *,
const struct ieee80211_channel *, int mode, int count);
void ieee80211_notify_radar(struct ieee80211com *,
const struct ieee80211_channel *);
enum ieee80211_notify_cac_event {
IEEE80211_NOTIFY_CAC_START = 0, /* CAC timer started */
IEEE80211_NOTIFY_CAC_STOP = 1, /* CAC intentionally stopped */
IEEE80211_NOTIFY_CAC_RADAR = 2, /* CAC stopped due to radar detectio */
IEEE80211_NOTIFY_CAC_EXPIRE = 3, /* CAC expired w/o radar */
};
void ieee80211_notify_cac(struct ieee80211com *,
const struct ieee80211_channel *,
enum ieee80211_notify_cac_event);
void ieee80211_notify_node_deauth(struct ieee80211_node *);
void ieee80211_notify_node_auth(struct ieee80211_node *);
void ieee80211_notify_country(struct ieee80211vap *, const uint8_t [],
const uint8_t cc[2]);
void ieee80211_notify_radio(struct ieee80211com *, int);
#endif /* _NET80211_IEEE80211_PROTO_H_ */
Index: head/sys/net80211/ieee80211_regdomain.c
===================================================================
--- head/sys/net80211/ieee80211_regdomain.c (revision 287196)
+++ head/sys/net80211/ieee80211_regdomain.c (revision 287197)
@@ -1,509 +1,508 @@
/*-
* Copyright (c) 2005-2008 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* IEEE 802.11 regdomain support.
*/
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
-
#include <sys/socket.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
static void
null_getradiocaps(struct ieee80211com *ic, int maxchan,
int *n, struct ieee80211_channel *c)
{
/* just feed back the current channel list */
if (maxchan > ic->ic_nchans)
maxchan = ic->ic_nchans;
memcpy(c, ic->ic_channels, maxchan*sizeof(struct ieee80211_channel));
*n = maxchan;
}
static int
null_setregdomain(struct ieee80211com *ic,
struct ieee80211_regdomain *rd,
int nchans, struct ieee80211_channel chans[])
{
return 0; /* accept anything */
}
void
ieee80211_regdomain_attach(struct ieee80211com *ic)
{
if (ic->ic_regdomain.regdomain == 0 &&
ic->ic_regdomain.country == CTRY_DEFAULT) {
ic->ic_regdomain.country = CTRY_UNITED_STATES; /* XXX */
ic->ic_regdomain.location = ' '; /* both */
ic->ic_regdomain.isocc[0] = 'U'; /* XXX */
ic->ic_regdomain.isocc[1] = 'S'; /* XXX */
/* NB: driver calls ieee80211_init_channels or similar */
}
ic->ic_getradiocaps = null_getradiocaps;
ic->ic_setregdomain = null_setregdomain;
}
void
ieee80211_regdomain_detach(struct ieee80211com *ic)
{
if (ic->ic_countryie != NULL) {
IEEE80211_FREE(ic->ic_countryie, M_80211_NODE_IE);
ic->ic_countryie = NULL;
}
}
void
ieee80211_regdomain_vattach(struct ieee80211vap *vap)
{
}
void
ieee80211_regdomain_vdetach(struct ieee80211vap *vap)
{
}
static void
addchan(struct ieee80211com *ic, int ieee, int flags)
{
struct ieee80211_channel *c;
c = &ic->ic_channels[ic->ic_nchans++];
c->ic_freq = ieee80211_ieee2mhz(ieee, flags);
c->ic_ieee = ieee;
c->ic_flags = flags;
if (flags & IEEE80211_CHAN_HT40U)
c->ic_extieee = ieee + 4;
else if (flags & IEEE80211_CHAN_HT40D)
c->ic_extieee = ieee - 4;
else
c->ic_extieee = 0;
}
/*
* Setup the channel list for the specified regulatory domain,
* country code, and operating modes. This interface is used
* when a driver does not obtain the channel list from another
* source (such as firmware).
*/
int
ieee80211_init_channels(struct ieee80211com *ic,
const struct ieee80211_regdomain *rd, const uint8_t bands[])
{
int i;
/* XXX just do something for now */
ic->ic_nchans = 0;
if (isset(bands, IEEE80211_MODE_11B) ||
isset(bands, IEEE80211_MODE_11G) ||
isset(bands, IEEE80211_MODE_11NG)) {
int maxchan = 11;
if (rd != NULL && rd->ecm)
maxchan = 14;
for (i = 1; i <= maxchan; i++) {
if (isset(bands, IEEE80211_MODE_11B))
addchan(ic, i, IEEE80211_CHAN_B);
if (isset(bands, IEEE80211_MODE_11G))
addchan(ic, i, IEEE80211_CHAN_G);
if (isset(bands, IEEE80211_MODE_11NG)) {
addchan(ic, i,
IEEE80211_CHAN_G | IEEE80211_CHAN_HT20);
}
if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) == 0)
continue;
if (i <= 7) {
addchan(ic, i,
IEEE80211_CHAN_G | IEEE80211_CHAN_HT40U);
addchan(ic, i + 4,
IEEE80211_CHAN_G | IEEE80211_CHAN_HT40D);
}
}
}
if (isset(bands, IEEE80211_MODE_11A) ||
isset(bands, IEEE80211_MODE_11NA)) {
for (i = 36; i <= 64; i += 4) {
addchan(ic, i, IEEE80211_CHAN_A);
if (isset(bands, IEEE80211_MODE_11NA)) {
addchan(ic, i,
IEEE80211_CHAN_A | IEEE80211_CHAN_HT20);
}
if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) == 0)
continue;
if ((i % 8) == 4) {
addchan(ic, i,
IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U);
addchan(ic, i + 4,
IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D);
}
}
for (i = 100; i <= 140; i += 4) {
addchan(ic, i, IEEE80211_CHAN_A);
if (isset(bands, IEEE80211_MODE_11NA)) {
addchan(ic, i,
IEEE80211_CHAN_A | IEEE80211_CHAN_HT20);
}
if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) == 0)
continue;
if ((i % 8) == 4 && i != 140) {
addchan(ic, i,
IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U);
addchan(ic, i + 4,
IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D);
}
}
for (i = 149; i <= 161; i += 4) {
addchan(ic, i, IEEE80211_CHAN_A);
if (isset(bands, IEEE80211_MODE_11NA)) {
addchan(ic, i,
IEEE80211_CHAN_A | IEEE80211_CHAN_HT20);
}
if ((ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40) == 0)
continue;
if ((i % 8) == 5) {
addchan(ic, i,
IEEE80211_CHAN_A | IEEE80211_CHAN_HT40U);
addchan(ic, i + 4,
IEEE80211_CHAN_A | IEEE80211_CHAN_HT40D);
}
}
}
if (rd != NULL)
ic->ic_regdomain = *rd;
return 0;
}
static __inline int
chancompar(const void *a, const void *b)
{
const struct ieee80211_channel *ca = a;
const struct ieee80211_channel *cb = b;
return (ca->ic_freq == cb->ic_freq) ?
(ca->ic_flags & IEEE80211_CHAN_ALL) -
(cb->ic_flags & IEEE80211_CHAN_ALL) :
ca->ic_freq - cb->ic_freq;
}
/*
* Insertion sort.
*/
#define swap(_a, _b, _size) { \
uint8_t *s = _b; \
int i = _size; \
do { \
uint8_t tmp = *_a; \
*_a++ = *s; \
*s++ = tmp; \
} while (--i); \
_a -= _size; \
}
static void
sort_channels(void *a, size_t n, size_t size)
{
uint8_t *aa = a;
uint8_t *ai, *t;
KASSERT(n > 0, ("no channels"));
for (ai = aa+size; --n >= 1; ai += size)
for (t = ai; t > aa; t -= size) {
uint8_t *u = t - size;
if (chancompar(u, t) <= 0)
break;
swap(u, t, size);
}
}
#undef swap
/*
* Order channels w/ the same frequency so that
* b < g < htg and a < hta. This is used to optimize
* channel table lookups and some user applications
* may also depend on it (though they should not).
*/
void
ieee80211_sort_channels(struct ieee80211_channel chans[], int nchans)
{
if (nchans > 0)
sort_channels(chans, nchans, sizeof(struct ieee80211_channel));
}
/*
* Allocate and construct a Country Information IE.
*/
struct ieee80211_appie *
ieee80211_alloc_countryie(struct ieee80211com *ic)
{
#define CHAN_UNINTERESTING \
(IEEE80211_CHAN_TURBO | IEEE80211_CHAN_STURBO | \
IEEE80211_CHAN_HALF | IEEE80211_CHAN_QUARTER)
/* XXX what about auto? */
/* flag set of channels to be excluded (band added below) */
static const int skipflags[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = CHAN_UNINTERESTING,
[IEEE80211_MODE_11A] = CHAN_UNINTERESTING,
[IEEE80211_MODE_11B] = CHAN_UNINTERESTING,
[IEEE80211_MODE_11G] = CHAN_UNINTERESTING,
[IEEE80211_MODE_FH] = CHAN_UNINTERESTING
| IEEE80211_CHAN_OFDM
| IEEE80211_CHAN_CCK
| IEEE80211_CHAN_DYN,
[IEEE80211_MODE_TURBO_A] = CHAN_UNINTERESTING,
[IEEE80211_MODE_TURBO_G] = CHAN_UNINTERESTING,
[IEEE80211_MODE_STURBO_A] = CHAN_UNINTERESTING,
[IEEE80211_MODE_HALF] = IEEE80211_CHAN_TURBO
| IEEE80211_CHAN_STURBO,
[IEEE80211_MODE_QUARTER] = IEEE80211_CHAN_TURBO
| IEEE80211_CHAN_STURBO,
[IEEE80211_MODE_11NA] = CHAN_UNINTERESTING,
[IEEE80211_MODE_11NG] = CHAN_UNINTERESTING,
};
const struct ieee80211_regdomain *rd = &ic->ic_regdomain;
uint8_t nextchan, chans[IEEE80211_CHAN_BYTES], *frm;
struct ieee80211_appie *aie;
struct ieee80211_country_ie *ie;
int i, skip, nruns;
aie = IEEE80211_MALLOC(IEEE80211_COUNTRY_MAX_SIZE, M_80211_NODE_IE,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (aie == NULL) {
ic_printf(ic, "%s: unable to allocate memory for country ie\n",
__func__);
/* XXX stat */
return NULL;
}
ie = (struct ieee80211_country_ie *) aie->ie_data;
ie->ie = IEEE80211_ELEMID_COUNTRY;
if (rd->isocc[0] == '\0') {
ic_printf(ic, "no ISO country string for cc %d; using blanks\n",
rd->country);
ie->cc[0] = ie->cc[1] = ' ';
} else {
ie->cc[0] = rd->isocc[0];
ie->cc[1] = rd->isocc[1];
}
/*
* Indoor/Outdoor portion of country string:
* 'I' indoor only
* 'O' outdoor only
* ' ' all enviroments
*/
ie->cc[2] = (rd->location == 'I' ? 'I' :
rd->location == 'O' ? 'O' : ' ');
/*
* Run-length encoded channel+max tx power info.
*/
frm = (uint8_t *)&ie->band[0];
nextchan = 0; /* NB: impossible channel # */
nruns = 0;
memset(chans, 0, sizeof(chans));
skip = skipflags[ieee80211_chan2mode(ic->ic_bsschan)];
if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan))
skip |= IEEE80211_CHAN_2GHZ;
else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan))
skip |= IEEE80211_CHAN_5GHZ;
for (i = 0; i < ic->ic_nchans; i++) {
const struct ieee80211_channel *c = &ic->ic_channels[i];
if (isset(chans, c->ic_ieee)) /* suppress dup's */
continue;
if (c->ic_flags & skip) /* skip band, etc. */
continue;
setbit(chans, c->ic_ieee);
if (c->ic_ieee != nextchan ||
c->ic_maxregpower != frm[-1]) { /* new run */
if (nruns == IEEE80211_COUNTRY_MAX_BANDS) {
ic_printf(ic, "%s: country ie too big, "
"runs > max %d, truncating\n",
__func__, IEEE80211_COUNTRY_MAX_BANDS);
/* XXX stat? fail? */
break;
}
frm[0] = c->ic_ieee; /* starting channel # */
frm[1] = 1; /* # channels in run */
frm[2] = c->ic_maxregpower; /* tx power cap */
frm += 3;
nextchan = c->ic_ieee + 1; /* overflow? */
nruns++;
} else { /* extend run */
frm[-2]++;
nextchan++;
}
}
ie->len = frm - ie->cc;
if (ie->len & 1) { /* Zero pad to multiple of 2 */
ie->len++;
*frm++ = 0;
}
aie->ie_len = frm - aie->ie_data;
return aie;
#undef CHAN_UNINTERESTING
}
static int
allvapsdown(struct ieee80211com *ic)
{
struct ieee80211vap *vap;
IEEE80211_LOCK_ASSERT(ic);
TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next)
if (vap->iv_state != IEEE80211_S_INIT)
return 0;
return 1;
}
int
ieee80211_setregdomain(struct ieee80211vap *vap,
struct ieee80211_regdomain_req *reg)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_channel *c;
int desfreq = 0, desflags = 0; /* XXX silence gcc complaint */
int error, i;
if (reg->rd.location != 'I' && reg->rd.location != 'O' &&
reg->rd.location != ' ') {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: invalid location 0x%x\n", __func__, reg->rd.location);
return EINVAL;
}
if (reg->rd.isocc[0] == '\0' || reg->rd.isocc[1] == '\0') {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: invalid iso cc 0x%x:0x%x\n", __func__,
reg->rd.isocc[0], reg->rd.isocc[1]);
return EINVAL;
}
if (reg->chaninfo.ic_nchans > IEEE80211_CHAN_MAX) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: too many channels %u, max %u\n", __func__,
reg->chaninfo.ic_nchans, IEEE80211_CHAN_MAX);
return EINVAL;
}
/*
* Calculate freq<->IEEE mapping and default max tx power
* for channels not setup. The driver can override these
* setting to reflect device properties/requirements.
*/
for (i = 0; i < reg->chaninfo.ic_nchans; i++) {
c = &reg->chaninfo.ic_chans[i];
if (c->ic_freq == 0 || c->ic_flags == 0) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: invalid channel spec at [%u]\n", __func__, i);
return EINVAL;
}
if (c->ic_maxregpower == 0) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: invalid channel spec, zero maxregpower, "
"freq %u flags 0x%x\n", __func__,
c->ic_freq, c->ic_flags);
return EINVAL;
}
if (c->ic_ieee == 0)
c->ic_ieee = ieee80211_mhz2ieee(c->ic_freq,c->ic_flags);
if (IEEE80211_IS_CHAN_HT40(c) && c->ic_extieee == 0)
c->ic_extieee = ieee80211_mhz2ieee(c->ic_freq +
(IEEE80211_IS_CHAN_HT40U(c) ? 20 : -20),
c->ic_flags);
if (c->ic_maxpower == 0)
c->ic_maxpower = 2*c->ic_maxregpower;
}
IEEE80211_LOCK(ic);
/* XXX bandaid; a running vap will likely crash */
if (!allvapsdown(ic)) {
IEEE80211_UNLOCK(ic);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: reject: vaps are running\n", __func__);
return EBUSY;
}
error = ic->ic_setregdomain(ic, &reg->rd,
reg->chaninfo.ic_nchans, reg->chaninfo.ic_chans);
if (error != 0) {
IEEE80211_UNLOCK(ic);
IEEE80211_DPRINTF(vap, IEEE80211_MSG_IOCTL,
"%s: driver rejected request, error %u\n", __func__, error);
return error;
}
/*
* Commit: copy in new channel table and reset media state.
* On return the state machines will be clocked so all vaps
* will reset their state.
*
* XXX ic_bsschan is marked undefined, must have vap's in
* INIT state or we blow up forcing stations off
*/
/*
* Save any desired channel for restore below. Note this
* needs to be done for all vaps but for now we only do
* the one where the ioctl is issued.
*/
if (vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
desfreq = vap->iv_des_chan->ic_freq;
desflags = vap->iv_des_chan->ic_flags;
}
/* regdomain parameters */
memcpy(&ic->ic_regdomain, &reg->rd, sizeof(reg->rd));
/* channel table */
memcpy(ic->ic_channels, reg->chaninfo.ic_chans,
reg->chaninfo.ic_nchans * sizeof(struct ieee80211_channel));
ic->ic_nchans = reg->chaninfo.ic_nchans;
memset(&ic->ic_channels[ic->ic_nchans], 0,
(IEEE80211_CHAN_MAX - ic->ic_nchans) *
sizeof(struct ieee80211_channel));
- ieee80211_media_init(ic);
+ ieee80211_chan_init(ic);
/*
* Invalidate channel-related state.
*/
if (ic->ic_countryie != NULL) {
IEEE80211_FREE(ic->ic_countryie, M_80211_NODE_IE);
ic->ic_countryie = NULL;
}
ieee80211_scan_flush(vap);
ieee80211_dfs_reset(ic);
if (vap->iv_des_chan != IEEE80211_CHAN_ANYC) {
c = ieee80211_find_channel(ic, desfreq, desflags);
/* NB: may be NULL if not present in new channel list */
vap->iv_des_chan = (c != NULL) ? c : IEEE80211_CHAN_ANYC;
}
IEEE80211_UNLOCK(ic);
return 0;
}
Index: head/sys/net80211/ieee80211_scan_sta.c
===================================================================
--- head/sys/net80211/ieee80211_scan_sta.c (revision 287196)
+++ head/sys/net80211/ieee80211_scan_sta.c (revision 287197)
@@ -1,1963 +1,1943 @@
/*-
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* IEEE 802.11 station scanning support.
*/
#include "opt_wlan.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_media.h>
#include <net/ethernet.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_input.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
#ifdef IEEE80211_SUPPORT_MESH
#include <net80211/ieee80211_mesh.h>
#endif
#include <net80211/ieee80211_ratectl.h>
#include <net/bpf.h>
/*
* Parameters for managing cache entries:
*
* o a station with STA_FAILS_MAX failures is not considered
* when picking a candidate
* o a station that hasn't had an update in STA_PURGE_SCANS
* (background) scans is discarded
* o after STA_FAILS_AGE seconds we clear the failure count
*/
#define STA_FAILS_MAX 2 /* assoc failures before ignored */
#define STA_FAILS_AGE (2*60) /* time before clearing fails (secs) */
#define STA_PURGE_SCANS 2 /* age for purging entries (scans) */
/* XXX tunable */
#define STA_RSSI_MIN 8 /* min acceptable rssi */
#define STA_RSSI_MAX 40 /* max rssi for comparison */
struct sta_entry {
struct ieee80211_scan_entry base;
TAILQ_ENTRY(sta_entry) se_list;
LIST_ENTRY(sta_entry) se_hash;
uint8_t se_fails; /* failure to associate count */
uint8_t se_seen; /* seen during current scan */
uint8_t se_notseen; /* not seen in previous scans */
uint8_t se_flags;
#define STA_DEMOTE11B 0x01 /* match w/ demoted 11b chan */
uint32_t se_avgrssi; /* LPF rssi state */
unsigned long se_lastupdate; /* time of last update */
unsigned long se_lastfail; /* time of last failure */
unsigned long se_lastassoc; /* time of last association */
u_int se_scangen; /* iterator scan gen# */
u_int se_countrygen; /* gen# of last cc notify */
};
#define STA_HASHSIZE 32
/* simple hash is enough for variation of macaddr */
#define STA_HASH(addr) \
(((const uint8_t *)(addr))[IEEE80211_ADDR_LEN - 1] % STA_HASHSIZE)
#define MAX_IEEE_CHAN 256 /* max acceptable IEEE chan # */
CTASSERT(MAX_IEEE_CHAN >= 256);
struct sta_table {
ieee80211_scan_table_lock_t st_lock; /* on scan table */
TAILQ_HEAD(, sta_entry) st_entry; /* all entries */
LIST_HEAD(, sta_entry) st_hash[STA_HASHSIZE];
ieee80211_scan_iter_lock_t st_scanlock; /* on st_scaniter */
u_int st_scaniter; /* gen# for iterator */
u_int st_scangen; /* scan generation # */
int st_newscan;
/* ap-related state */
int st_maxrssi[MAX_IEEE_CHAN];
};
static void sta_flush_table(struct sta_table *);
/*
* match_bss returns a bitmask describing if an entry is suitable
* for use. If non-zero the entry was deemed not suitable and it's
* contents explains why. The following flags are or'd to to this
* mask and can be used to figure out why the entry was rejected.
*/
#define MATCH_CHANNEL 0x00001 /* channel mismatch */
#define MATCH_CAPINFO 0x00002 /* capabilities mismatch, e.g. no ess */
#define MATCH_PRIVACY 0x00004 /* privacy mismatch */
#define MATCH_RATE 0x00008 /* rate set mismatch */
#define MATCH_SSID 0x00010 /* ssid mismatch */
#define MATCH_BSSID 0x00020 /* bssid mismatch */
#define MATCH_FAILS 0x00040 /* too many failed auth attempts */
#define MATCH_NOTSEEN 0x00080 /* not seen in recent scans */
#define MATCH_RSSI 0x00100 /* rssi deemed too low to use */
#define MATCH_CC 0x00200 /* country code mismatch */
#define MATCH_TDMA_NOIE 0x00400 /* no TDMA ie */
#define MATCH_TDMA_NOTMASTER 0x00800 /* not TDMA master */
#define MATCH_TDMA_NOSLOT 0x01000 /* all TDMA slots occupied */
#define MATCH_TDMA_LOCAL 0x02000 /* local address */
#define MATCH_TDMA_VERSION 0x04000 /* protocol version mismatch */
#define MATCH_MESH_NOID 0x10000 /* no MESHID ie */
#define MATCH_MESHID 0x20000 /* meshid mismatch */
static int match_bss(struct ieee80211vap *,
const struct ieee80211_scan_state *, struct sta_entry *, int);
static void adhoc_age(struct ieee80211_scan_state *);
static __inline int
isocmp(const uint8_t cc1[], const uint8_t cc2[])
{
return (cc1[0] == cc2[0] && cc1[1] == cc2[1]);
}
/* number of references from net80211 layer */
static int nrefs = 0;
/*
* Module glue.
*/
IEEE80211_SCANNER_MODULE(sta, 1);
/*
* Attach prior to any scanning work.
*/
static int
sta_attach(struct ieee80211_scan_state *ss)
{
struct sta_table *st;
st = (struct sta_table *) IEEE80211_MALLOC(sizeof(struct sta_table),
M_80211_SCAN,
IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (st == NULL)
return 0;
IEEE80211_SCAN_TABLE_LOCK_INIT(st, "scantable");
IEEE80211_SCAN_ITER_LOCK_INIT(st, "scangen");
TAILQ_INIT(&st->st_entry);
ss->ss_priv = st;
nrefs++; /* NB: we assume caller locking */
return 1;
}
/*
* Cleanup any private state.
*/
static int
sta_detach(struct ieee80211_scan_state *ss)
{
struct sta_table *st = ss->ss_priv;
if (st != NULL) {
sta_flush_table(st);
IEEE80211_SCAN_TABLE_LOCK_DESTROY(st);
IEEE80211_SCAN_ITER_LOCK_DESTROY(st);
IEEE80211_FREE(st, M_80211_SCAN);
KASSERT(nrefs > 0, ("imbalanced attach/detach"));
nrefs--; /* NB: we assume caller locking */
}
return 1;
}
/*
* Flush all per-scan state.
*/
static int
sta_flush(struct ieee80211_scan_state *ss)
{
struct sta_table *st = ss->ss_priv;
IEEE80211_SCAN_TABLE_LOCK(st);
sta_flush_table(st);
IEEE80211_SCAN_TABLE_UNLOCK(st);
ss->ss_last = 0;
return 0;
}
/*
* Flush all entries in the scan cache.
*/
static void
sta_flush_table(struct sta_table *st)
{
struct sta_entry *se, *next;
TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
TAILQ_REMOVE(&st->st_entry, se, se_list);
LIST_REMOVE(se, se_hash);
ieee80211_ies_cleanup(&se->base.se_ies);
IEEE80211_FREE(se, M_80211_SCAN);
}
memset(st->st_maxrssi, 0, sizeof(st->st_maxrssi));
}
/*
* Process a beacon or probe response frame; create an
* entry in the scan cache or update any previous entry.
*/
static int
sta_add(struct ieee80211_scan_state *ss,
struct ieee80211_channel *curchan,
const struct ieee80211_scanparams *sp,
const struct ieee80211_frame *wh,
int subtype, int rssi, int noise)
{
#define ISPROBE(_st) ((_st) == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
#define PICK1ST(_ss) \
((ss->ss_flags & (IEEE80211_SCAN_PICK1ST | IEEE80211_SCAN_GOTPICK)) == \
IEEE80211_SCAN_PICK1ST)
struct sta_table *st = ss->ss_priv;
const uint8_t *macaddr = wh->i_addr2;
struct ieee80211vap *vap = ss->ss_vap;
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_channel *c;
struct sta_entry *se;
struct ieee80211_scan_entry *ise;
int hash;
hash = STA_HASH(macaddr);
IEEE80211_SCAN_TABLE_LOCK(st);
LIST_FOREACH(se, &st->st_hash[hash], se_hash)
if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr))
goto found;
se = (struct sta_entry *) IEEE80211_MALLOC(sizeof(struct sta_entry),
M_80211_SCAN, IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
if (se == NULL) {
IEEE80211_SCAN_TABLE_UNLOCK(st);
return 0;
}
se->se_scangen = st->st_scaniter-1;
se->se_avgrssi = IEEE80211_RSSI_DUMMY_MARKER;
IEEE80211_ADDR_COPY(se->base.se_macaddr, macaddr);
TAILQ_INSERT_TAIL(&st->st_entry, se, se_list);
LIST_INSERT_HEAD(&st->st_hash[hash], se, se_hash);
found:
ise = &se->base;
/* XXX ap beaconing multiple ssid w/ same bssid */
if (sp->ssid[1] != 0 &&
(ISPROBE(subtype) || ise->se_ssid[1] == 0))
memcpy(ise->se_ssid, sp->ssid, 2+sp->ssid[1]);
KASSERT(sp->rates[1] <= IEEE80211_RATE_MAXSIZE,
("rate set too large: %u", sp->rates[1]));
memcpy(ise->se_rates, sp->rates, 2+sp->rates[1]);
if (sp->xrates != NULL) {
/* XXX validate xrates[1] */
KASSERT(sp->xrates[1] <= IEEE80211_RATE_MAXSIZE,
("xrate set too large: %u", sp->xrates[1]));
memcpy(ise->se_xrates, sp->xrates, 2+sp->xrates[1]);
} else
ise->se_xrates[1] = 0;
IEEE80211_ADDR_COPY(ise->se_bssid, wh->i_addr3);
if ((sp->status & IEEE80211_BPARSE_OFFCHAN) == 0) {
/*
* Record rssi data using extended precision LPF filter.
*
* NB: use only on-channel data to insure we get a good
* estimate of the signal we'll see when associated.
*/
IEEE80211_RSSI_LPF(se->se_avgrssi, rssi);
ise->se_rssi = IEEE80211_RSSI_GET(se->se_avgrssi);
ise->se_noise = noise;
}
memcpy(ise->se_tstamp.data, sp->tstamp, sizeof(ise->se_tstamp));
ise->se_intval = sp->bintval;
ise->se_capinfo = sp->capinfo;
#ifdef IEEE80211_SUPPORT_MESH
if (sp->meshid != NULL && sp->meshid[1] != 0)
memcpy(ise->se_meshid, sp->meshid, 2+sp->meshid[1]);
#endif
/*
* Beware of overriding se_chan for frames seen
* off-channel; this can cause us to attempt an
* association on the wrong channel.
*/
if (sp->status & IEEE80211_BPARSE_OFFCHAN) {
/*
* Off-channel, locate the home/bss channel for the sta
* using the value broadcast in the DSPARMS ie. We know
* sp->chan has this value because it's used to calculate
* IEEE80211_BPARSE_OFFCHAN.
*/
c = ieee80211_find_channel_byieee(ic, sp->chan,
curchan->ic_flags);
if (c != NULL) {
ise->se_chan = c;
} else if (ise->se_chan == NULL) {
/* should not happen, pick something */
ise->se_chan = curchan;
}
} else
ise->se_chan = curchan;
if (IEEE80211_IS_CHAN_HT(ise->se_chan) && sp->htcap == NULL) {
/* Demote legacy networks to a non-HT channel. */
c = ieee80211_find_channel(ic, ise->se_chan->ic_freq,
ise->se_chan->ic_flags & ~IEEE80211_CHAN_HT);
KASSERT(c != NULL,
("no legacy channel %u", ise->se_chan->ic_ieee));
ise->se_chan = c;
}
ise->se_fhdwell = sp->fhdwell;
ise->se_fhindex = sp->fhindex;
ise->se_erp = sp->erp;
ise->se_timoff = sp->timoff;
if (sp->tim != NULL) {
const struct ieee80211_tim_ie *tim =
(const struct ieee80211_tim_ie *) sp->tim;
ise->se_dtimperiod = tim->tim_period;
}
if (sp->country != NULL) {
const struct ieee80211_country_ie *cie =
(const struct ieee80211_country_ie *) sp->country;
/*
* If 11d is enabled and we're attempting to join a bss
* that advertises it's country code then compare our
* current settings to what we fetched from the country ie.
* If our country code is unspecified or different then
* dispatch an event to user space that identifies the
* country code so our regdomain config can be changed.
*/
/* XXX only for STA mode? */
if ((IEEE80211_IS_CHAN_11D(ise->se_chan) ||
(vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) &&
(ic->ic_regdomain.country == CTRY_DEFAULT ||
!isocmp(cie->cc, ic->ic_regdomain.isocc))) {
/* only issue one notify event per scan */
if (se->se_countrygen != st->st_scangen) {
ieee80211_notify_country(vap, ise->se_bssid,
cie->cc);
se->se_countrygen = st->st_scangen;
}
}
ise->se_cc[0] = cie->cc[0];
ise->se_cc[1] = cie->cc[1];
}
/* NB: no need to setup ie ptrs; they are not (currently) used */
(void) ieee80211_ies_init(&ise->se_ies, sp->ies, sp->ies_len);
/* clear failure count after STA_FAIL_AGE passes */
if (se->se_fails && (ticks - se->se_lastfail) > STA_FAILS_AGE*hz) {
se->se_fails = 0;
IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_SCAN, macaddr,
"%s: fails %u", __func__, se->se_fails);
}
se->se_lastupdate = ticks; /* update time */
se->se_seen = 1;
se->se_notseen = 0;
KASSERT(sizeof(sp->bchan) == 1, ("bchan size"));
if (rssi > st->st_maxrssi[sp->bchan])
st->st_maxrssi[sp->bchan] = rssi;
IEEE80211_SCAN_TABLE_UNLOCK(st);
/*
* If looking for a quick choice and nothing's
* been found check here.
*/
if (PICK1ST(ss) && match_bss(vap, ss, se, IEEE80211_MSG_SCAN) == 0)
ss->ss_flags |= IEEE80211_SCAN_GOTPICK;
return 1;
#undef PICK1ST
#undef ISPROBE
}
/*
* Check if a channel is excluded by user request.
*/
static int
isexcluded(struct ieee80211vap *vap, const struct ieee80211_channel *c)
{
return (isclr(vap->iv_ic->ic_chan_active, c->ic_ieee) ||
(vap->iv_des_chan != IEEE80211_CHAN_ANYC &&
c->ic_freq != vap->iv_des_chan->ic_freq));
}
static struct ieee80211_channel *
find11gchannel(struct ieee80211com *ic, int i, int freq)
{
struct ieee80211_channel *c;
int j;
/*
* The normal ordering in the channel list is b channel
* immediately followed by g so optimize the search for
* this. We'll still do a full search just in case.
*/
for (j = i+1; j < ic->ic_nchans; j++) {
c = &ic->ic_channels[j];
if (c->ic_freq == freq && IEEE80211_IS_CHAN_G(c))
return c;
}
for (j = 0; j < i; j++) {
c = &ic->ic_channels[j];
if (c->ic_freq == freq && IEEE80211_IS_CHAN_G(c))
return c;
}
return NULL;
}
static const u_int chanflags[IEEE80211_MODE_MAX] = {
[IEEE80211_MODE_AUTO] = IEEE80211_CHAN_B,
[IEEE80211_MODE_11A] = IEEE80211_CHAN_A,
[IEEE80211_MODE_11B] = IEEE80211_CHAN_B,
[IEEE80211_MODE_11G] = IEEE80211_CHAN_G,
[IEEE80211_MODE_FH] = IEEE80211_CHAN_FHSS,
/* check base channel */
[IEEE80211_MODE_TURBO_A] = IEEE80211_CHAN_A,
[IEEE80211_MODE_TURBO_G] = IEEE80211_CHAN_G,
[IEEE80211_MODE_STURBO_A] = IEEE80211_CHAN_ST,
[IEEE80211_MODE_HALF] = IEEE80211_CHAN_HALF,
[IEEE80211_MODE_QUARTER] = IEEE80211_CHAN_QUARTER,
/* check legacy */
[IEEE80211_MODE_11NA] = IEEE80211_CHAN_A,
[IEEE80211_MODE_11NG] = IEEE80211_CHAN_G,
};
static void
add_channels(struct ieee80211vap *vap,
struct ieee80211_scan_state *ss,
enum ieee80211_phymode mode, const uint16_t freq[], int nfreq)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_channel *c, *cg;
u_int modeflags;
int i;
KASSERT(mode < nitems(chanflags), ("Unexpected mode %u", mode));
modeflags = chanflags[mode];
for (i = 0; i < nfreq; i++) {
if (ss->ss_last >= IEEE80211_SCAN_MAX)
break;
c = ieee80211_find_channel(ic, freq[i], modeflags);
if (c == NULL || isexcluded(vap, c))
continue;
if (mode == IEEE80211_MODE_AUTO) {
/*
* XXX special-case 11b/g channels so we select
* the g channel if both are present.
*/
if (IEEE80211_IS_CHAN_B(c) &&
(cg = find11gchannel(ic, i, c->ic_freq)) != NULL)
c = cg;
}
ss->ss_chans[ss->ss_last++] = c;
}
}
struct scanlist {
uint16_t mode;
uint16_t count;
const uint16_t *list;
};
static int
checktable(const struct scanlist *scan, const struct ieee80211_channel *c)
{
int i;
for (; scan->list != NULL; scan++) {
for (i = 0; i < scan->count; i++)
if (scan->list[i] == c->ic_freq)
return 1;
}
return 0;
}
static int
onscanlist(const struct ieee80211_scan_state *ss,
const struct ieee80211_channel *c)
{
int i;
for (i = 0; i < ss->ss_last; i++)
if (ss->ss_chans[i] == c)
return 1;
return 0;
}
static void
sweepchannels(struct ieee80211_scan_state *ss, struct ieee80211vap *vap,
const struct scanlist table[])
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_channel *c;
int i;
for (i = 0; i < ic->ic_nchans; i++) {
if (ss->ss_last >= IEEE80211_SCAN_MAX)
break;
c = &ic->ic_channels[i];
/*
* Ignore dynamic turbo channels; we scan them
* in normal mode (i.e. not boosted). Likewise
* for HT channels, they get scanned using
* legacy rates.
*/
if (IEEE80211_IS_CHAN_DTURBO(c) || IEEE80211_IS_CHAN_HT(c))
continue;
/*
* If a desired mode was specified, scan only
* channels that satisfy that constraint.
*/
if (vap->iv_des_mode != IEEE80211_MODE_AUTO &&
vap->iv_des_mode != ieee80211_chan2mode(c))
continue;
/*
* Skip channels excluded by user request.
*/
if (isexcluded(vap, c))
continue;
/*
* Add the channel unless it is listed in the
* fixed scan order tables. This insures we
* don't sweep back in channels we filtered out
* above.
*/
if (checktable(table, c))
continue;
/* Add channel to scanning list. */
ss->ss_chans[ss->ss_last++] = c;
}
/*
* Explicitly add any desired channel if:
* - not already on the scan list
* - allowed by any desired mode constraint
* - there is space in the scan list
* This allows the channel to be used when the filtering
* mechanisms would otherwise elide it (e.g HT, turbo).
*/
c = vap->iv_des_chan;
if (c != IEEE80211_CHAN_ANYC &&
!onscanlist(ss, c) &&
(vap->iv_des_mode == IEEE80211_MODE_AUTO ||
vap->iv_des_mode == ieee80211_chan2mode(c)) &&
ss->ss_last < IEEE80211_SCAN_MAX)
ss->ss_chans[ss->ss_last++] = c;
}
static void
makescanlist(struct ieee80211_scan_state *ss, struct ieee80211vap *vap,
const struct scanlist table[])
{
const struct scanlist *scan;
enum ieee80211_phymode mode;
ss->ss_last = 0;
/*
* Use the table of ordered channels to construct the list
* of channels for scanning. Any channels in the ordered
* list not in the master list will be discarded.
*/
for (scan = table; scan->list != NULL; scan++) {
mode = scan->mode;
if (vap->iv_des_mode != IEEE80211_MODE_AUTO) {
/*
* If a desired mode was specified, scan only
* channels that satisfy that constraint.
*/
if (vap->iv_des_mode != mode) {
/*
* The scan table marks 2.4Ghz channels as b
* so if the desired mode is 11g, then use
* the 11b channel list but upgrade the mode.
*/
if (vap->iv_des_mode == IEEE80211_MODE_11G) {
if (mode == IEEE80211_MODE_11G) /* Skip the G check */
continue;
else if (mode == IEEE80211_MODE_11B)
mode = IEEE80211_MODE_11G; /* upgrade */
}
}
} else {
/*
* This lets add_channels upgrade an 11b channel
* to 11g if available.
*/
if (mode == IEEE80211_MODE_11B)
mode = IEEE80211_MODE_AUTO;
}
#ifdef IEEE80211_F_XR
/* XR does not operate on turbo channels */
if ((vap->iv_flags & IEEE80211_F_XR) &&
(mode == IEEE80211_MODE_TURBO_A ||
mode == IEEE80211_MODE_TURBO_G ||
mode == IEEE80211_MODE_STURBO_A))
continue;
#endif
/*
* Add the list of the channels; any that are not
* in the master channel list will be discarded.
*/
add_channels(vap, ss, mode, scan->list, scan->count);
}
/*
* Add the channels from the ic that are not present
* in the table.
*/
sweepchannels(ss, vap, table);
}
static const uint16_t rcl1[] = /* 8 FCC channel: 52, 56, 60, 64, 36, 40, 44, 48 */
{ 5260, 5280, 5300, 5320, 5180, 5200, 5220, 5240 };
static const uint16_t rcl2[] = /* 4 MKK channels: 34, 38, 42, 46 */
{ 5170, 5190, 5210, 5230 };
static const uint16_t rcl3[] = /* 2.4Ghz ch: 1,6,11,7,13 */
{ 2412, 2437, 2462, 2442, 2472 };
static const uint16_t rcl4[] = /* 5 FCC channel: 149, 153, 161, 165 */
{ 5745, 5765, 5785, 5805, 5825 };
static const uint16_t rcl7[] = /* 11 ETSI channel: 100,104,108,112,116,120,124,128,132,136,140 */
{ 5500, 5520, 5540, 5560, 5580, 5600, 5620, 5640, 5660, 5680, 5700 };
static const uint16_t rcl8[] = /* 2.4Ghz ch: 2,3,4,5,8,9,10,12 */
{ 2417, 2422, 2427, 2432, 2447, 2452, 2457, 2467 };
static const uint16_t rcl9[] = /* 2.4Ghz ch: 14 */
{ 2484 };
static const uint16_t rcl10[] = /* Added Korean channels 2312-2372 */
{ 2312, 2317, 2322, 2327, 2332, 2337, 2342, 2347, 2352, 2357, 2362, 2367, 2372 };
static const uint16_t rcl11[] = /* Added Japan channels in 4.9/5.0 spectrum */
{ 5040, 5060, 5080, 4920, 4940, 4960, 4980 };
#ifdef ATH_TURBO_SCAN
static const uint16_t rcl5[] = /* 3 static turbo channels */
{ 5210, 5250, 5290 };
static const uint16_t rcl6[] = /* 2 static turbo channels */
{ 5760, 5800 };
static const uint16_t rcl6x[] = /* 4 FCC3 turbo channels */
{ 5540, 5580, 5620, 5660 };
static const uint16_t rcl12[] = /* 2.4Ghz Turbo channel 6 */
{ 2437 };
static const uint16_t rcl13[] = /* dynamic Turbo channels */
{ 5200, 5240, 5280, 5765, 5805 };
#endif /* ATH_TURBO_SCAN */
#define X(a) .count = sizeof(a)/sizeof(a[0]), .list = a
static const struct scanlist staScanTable[] = {
{ IEEE80211_MODE_11B, X(rcl3) },
{ IEEE80211_MODE_11A, X(rcl1) },
{ IEEE80211_MODE_11A, X(rcl2) },
{ IEEE80211_MODE_11B, X(rcl8) },
{ IEEE80211_MODE_11B, X(rcl9) },
{ IEEE80211_MODE_11A, X(rcl4) },
#ifdef ATH_TURBO_SCAN
{ IEEE80211_MODE_STURBO_A, X(rcl5) },
{ IEEE80211_MODE_STURBO_A, X(rcl6) },
{ IEEE80211_MODE_TURBO_A, X(rcl6x) },
{ IEEE80211_MODE_TURBO_A, X(rcl13) },
#endif /* ATH_TURBO_SCAN */
{ IEEE80211_MODE_11A, X(rcl7) },
{ IEEE80211_MODE_11B, X(rcl10) },
{ IEEE80211_MODE_11A, X(rcl11) },
#ifdef ATH_TURBO_SCAN
{ IEEE80211_MODE_TURBO_G, X(rcl12) },
#endif /* ATH_TURBO_SCAN */
{ .list = NULL }
};
/*
* Start a station-mode scan by populating the channel list.
*/
static int
sta_start(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
struct sta_table *st = ss->ss_priv;
makescanlist(ss, vap, staScanTable);
if (ss->ss_mindwell == 0)
ss->ss_mindwell = msecs_to_ticks(20); /* 20ms */
if (ss->ss_maxdwell == 0)
ss->ss_maxdwell = msecs_to_ticks(200); /* 200ms */
st->st_scangen++;
st->st_newscan = 1;
return 0;
}
/*
* Restart a scan, typically a bg scan but can
* also be a fg scan that came up empty.
*/
static int
sta_restart(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
struct sta_table *st = ss->ss_priv;
st->st_newscan = 1;
return 0;
}
/*
* Cancel an ongoing scan.
*/
static int
sta_cancel(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
return 0;
}
/* unaligned little endian access */
#define LE_READ_2(p) \
((uint16_t) \
((((const uint8_t *)(p))[0] ) | \
(((const uint8_t *)(p))[1] << 8)))
/*
* Demote any supplied 11g channel to 11b. There should
* always be an 11b channel but we check anyway...
*/
static struct ieee80211_channel *
demote11b(struct ieee80211vap *vap, struct ieee80211_channel *chan)
{
struct ieee80211_channel *c;
if (IEEE80211_IS_CHAN_ANYG(chan) &&
vap->iv_des_mode == IEEE80211_MODE_AUTO) {
c = ieee80211_find_channel(vap->iv_ic, chan->ic_freq,
(chan->ic_flags &~ (IEEE80211_CHAN_PUREG | IEEE80211_CHAN_G)) |
IEEE80211_CHAN_B);
if (c != NULL)
chan = c;
}
return chan;
}
static int
maxrate(const struct ieee80211_scan_entry *se)
{
const struct ieee80211_ie_htcap *htcap =
(const struct ieee80211_ie_htcap *) se->se_ies.htcap_ie;
int rmax, r, i, txstream;
uint16_t caps;
uint8_t txparams;
rmax = 0;
if (htcap != NULL) {
/*
* HT station; inspect supported MCS and then adjust
* rate by channel width.
*/
txparams = htcap->hc_mcsset[12];
if (txparams & 0x3) {
/*
* TX MCS parameters defined and not equal to RX,
* extract the number of spartial streams and
* map it to the highest MCS rate.
*/
txstream = ((txparams & 0xc) >> 2) + 1;
i = txstream * 8 - 1;
} else
for (i = 31; i >= 0 && isclr(htcap->hc_mcsset, i); i--);
if (i >= 0) {
caps = LE_READ_2(&htcap->hc_cap);
if ((caps & IEEE80211_HTCAP_CHWIDTH40) &&
(caps & IEEE80211_HTCAP_SHORTGI40))
rmax = ieee80211_htrates[i].ht40_rate_400ns;
else if (caps & IEEE80211_HTCAP_CHWIDTH40)
rmax = ieee80211_htrates[i].ht40_rate_800ns;
else if (caps & IEEE80211_HTCAP_SHORTGI20)
rmax = ieee80211_htrates[i].ht20_rate_400ns;
else
rmax = ieee80211_htrates[i].ht20_rate_800ns;
}
}
for (i = 0; i < se->se_rates[1]; i++) {
r = se->se_rates[2+i] & IEEE80211_RATE_VAL;
if (r > rmax)
rmax = r;
}
for (i = 0; i < se->se_xrates[1]; i++) {
r = se->se_xrates[2+i] & IEEE80211_RATE_VAL;
if (r > rmax)
rmax = r;
}
return rmax;
}
/*
* Compare the capabilities of two entries and decide which is
* more desirable (return >0 if a is considered better). Note
* that we assume compatibility/usability has already been checked
* so we don't need to (e.g. validate whether privacy is supported).
* Used to select the best scan candidate for association in a BSS.
*/
static int
sta_compare(const struct sta_entry *a, const struct sta_entry *b)
{
#define PREFER(_a,_b,_what) do { \
if (((_a) ^ (_b)) & (_what)) \
return ((_a) & (_what)) ? 1 : -1; \
} while (0)
int maxa, maxb;
int8_t rssia, rssib;
int weight;
/* privacy support */
PREFER(a->base.se_capinfo, b->base.se_capinfo,
IEEE80211_CAPINFO_PRIVACY);
/* compare count of previous failures */
weight = b->se_fails - a->se_fails;
if (abs(weight) > 1)
return weight;
/*
* Compare rssi. If the two are considered equivalent
* then fallback to other criteria. We threshold the
* comparisons to avoid selecting an ap purely by rssi
* when both values may be good but one ap is otherwise
* more desirable (e.g. an 11b-only ap with stronger
* signal than an 11g ap).
*/
rssia = MIN(a->base.se_rssi, STA_RSSI_MAX);
rssib = MIN(b->base.se_rssi, STA_RSSI_MAX);
if (abs(rssib - rssia) < 5) {
/* best/max rate preferred if signal level close enough XXX */
maxa = maxrate(&a->base);
maxb = maxrate(&b->base);
if (maxa != maxb)
return maxa - maxb;
/* XXX use freq for channel preference */
/* for now just prefer 5Ghz band to all other bands */
PREFER(IEEE80211_IS_CHAN_5GHZ(a->base.se_chan),
IEEE80211_IS_CHAN_5GHZ(b->base.se_chan), 1);
}
/* all things being equal, use signal level */
return a->base.se_rssi - b->base.se_rssi;
#undef PREFER
}
/*
* Check rate set suitability and return the best supported rate.
* XXX inspect MCS for HT
*/
static int
check_rate(struct ieee80211vap *vap, const struct ieee80211_channel *chan,
const struct ieee80211_scan_entry *se)
{
#define RV(v) ((v) & IEEE80211_RATE_VAL)
const struct ieee80211_rateset *srs;
int i, j, nrs, r, okrate, badrate, fixedrate, ucastrate;
const uint8_t *rs;
okrate = badrate = 0;
srs = ieee80211_get_suprates(vap->iv_ic, chan);
nrs = se->se_rates[1];
rs = se->se_rates+2;
/* XXX MCS */
ucastrate = vap->iv_txparms[ieee80211_chan2mode(chan)].ucastrate;
fixedrate = IEEE80211_FIXED_RATE_NONE;
again:
for (i = 0; i < nrs; i++) {
r = RV(rs[i]);
badrate = r;
/*
* Check any fixed rate is included.
*/
if (r == ucastrate)
fixedrate = r;
/*
* Check against our supported rates.
*/
for (j = 0; j < srs->rs_nrates; j++)
if (r == RV(srs->rs_rates[j])) {
if (r > okrate) /* NB: track max */
okrate = r;
break;
}
if (j == srs->rs_nrates && (rs[i] & IEEE80211_RATE_BASIC)) {
/*
* Don't try joining a BSS, if we don't support
* one of its basic rates.
*/
okrate = 0;
goto back;
}
}
if (rs == se->se_rates+2) {
/* scan xrates too; sort of an algol68-style for loop */
nrs = se->se_xrates[1];
rs = se->se_xrates+2;
goto again;
}
back:
if (okrate == 0 || ucastrate != fixedrate)
return badrate | IEEE80211_RATE_BASIC;
else
return RV(okrate);
#undef RV
}
static __inline int
match_id(const uint8_t *ie, const uint8_t *val, int len)
{
return (ie[1] == len && memcmp(ie+2, val, len) == 0);
}
static int
match_ssid(const uint8_t *ie,
int nssid, const struct ieee80211_scan_ssid ssids[])
{
int i;
for (i = 0; i < nssid; i++) {
if (match_id(ie, ssids[i].ssid, ssids[i].len))
return 1;
}
return 0;
}
#ifdef IEEE80211_SUPPORT_TDMA
static int
tdma_isfull(const struct ieee80211_tdma_param *tdma)
{
int slot, slotcnt;
slotcnt = tdma->tdma_slotcnt;
for (slot = slotcnt-1; slot >= 0; slot--)
if (isclr(tdma->tdma_inuse, slot))
return 0;
return 1;
}
#endif /* IEEE80211_SUPPORT_TDMA */
/*
* Test a scan candidate for suitability/compatibility.
*/
static int
match_bss(struct ieee80211vap *vap,
const struct ieee80211_scan_state *ss, struct sta_entry *se0,
int debug)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_scan_entry *se = &se0->base;
uint8_t rate;
int fail;
fail = 0;
if (isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, se->se_chan)))
fail |= MATCH_CHANNEL;
/*
* NB: normally the desired mode is used to construct
* the channel list, but it's possible for the scan
* cache to include entries for stations outside this
* list so we check the desired mode here to weed them
* out.
*/
if (vap->iv_des_mode != IEEE80211_MODE_AUTO &&
(se->se_chan->ic_flags & IEEE80211_CHAN_ALLTURBO) !=
chanflags[vap->iv_des_mode])
fail |= MATCH_CHANNEL;
if (vap->iv_opmode == IEEE80211_M_IBSS) {
if ((se->se_capinfo & IEEE80211_CAPINFO_IBSS) == 0)
fail |= MATCH_CAPINFO;
#ifdef IEEE80211_SUPPORT_TDMA
} else if (vap->iv_opmode == IEEE80211_M_AHDEMO) {
/*
* Adhoc demo network setup shouldn't really be scanning
* but just in case skip stations operating in IBSS or
* BSS mode.
*/
if (se->se_capinfo & (IEEE80211_CAPINFO_IBSS|IEEE80211_CAPINFO_ESS))
fail |= MATCH_CAPINFO;
/*
* TDMA operation cannot coexist with a normal 802.11 network;
* skip if IBSS or ESS capabilities are marked and require
* the beacon have a TDMA ie present.
*/
if (vap->iv_caps & IEEE80211_C_TDMA) {
const struct ieee80211_tdma_param *tdma =
(const struct ieee80211_tdma_param *)se->se_ies.tdma_ie;
const struct ieee80211_tdma_state *ts = vap->iv_tdma;
if (tdma == NULL)
fail |= MATCH_TDMA_NOIE;
else if (tdma->tdma_version != ts->tdma_version)
fail |= MATCH_TDMA_VERSION;
else if (tdma->tdma_slot != 0)
fail |= MATCH_TDMA_NOTMASTER;
else if (tdma_isfull(tdma))
fail |= MATCH_TDMA_NOSLOT;
#if 0
else if (ieee80211_local_address(se->se_macaddr))
fail |= MATCH_TDMA_LOCAL;
#endif
}
#endif /* IEEE80211_SUPPORT_TDMA */
#ifdef IEEE80211_SUPPORT_MESH
} else if (vap->iv_opmode == IEEE80211_M_MBSS) {
const struct ieee80211_mesh_state *ms = vap->iv_mesh;
/*
* Mesh nodes have IBSS & ESS bits in capinfo turned off
* and two special ie's that must be present.
*/
if (se->se_capinfo & (IEEE80211_CAPINFO_IBSS|IEEE80211_CAPINFO_ESS))
fail |= MATCH_CAPINFO;
else if (se->se_meshid[0] != IEEE80211_ELEMID_MESHID)
fail |= MATCH_MESH_NOID;
else if (ms->ms_idlen != 0 &&
match_id(se->se_meshid, ms->ms_id, ms->ms_idlen))
fail |= MATCH_MESHID;
#endif
} else {
if ((se->se_capinfo & IEEE80211_CAPINFO_ESS) == 0)
fail |= MATCH_CAPINFO;
/*
* If 11d is enabled and we're attempting to join a bss
* that advertises it's country code then compare our
* current settings to what we fetched from the country ie.
* If our country code is unspecified or different then do
* not attempt to join the bss. We should have already
* dispatched an event to user space that identifies the
* new country code so our regdomain config should match.
*/
if ((IEEE80211_IS_CHAN_11D(se->se_chan) ||
(vap->iv_flags_ext & IEEE80211_FEXT_DOTD)) &&
se->se_cc[0] != 0 &&
(ic->ic_regdomain.country == CTRY_DEFAULT ||
!isocmp(se->se_cc, ic->ic_regdomain.isocc)))
fail |= MATCH_CC;
}
if (vap->iv_flags & IEEE80211_F_PRIVACY) {
if ((se->se_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0)
fail |= MATCH_PRIVACY;
} else {
/* XXX does this mean privacy is supported or required? */
if (se->se_capinfo & IEEE80211_CAPINFO_PRIVACY)
fail |= MATCH_PRIVACY;
}
se0->se_flags &= ~STA_DEMOTE11B;
rate = check_rate(vap, se->se_chan, se);
if (rate & IEEE80211_RATE_BASIC) {
fail |= MATCH_RATE;
/*
* An 11b-only ap will give a rate mismatch if there is an
* OFDM fixed tx rate for 11g. Try downgrading the channel
* in the scan list to 11b and retry the rate check.
*/
if (IEEE80211_IS_CHAN_ANYG(se->se_chan)) {
rate = check_rate(vap, demote11b(vap, se->se_chan), se);
if ((rate & IEEE80211_RATE_BASIC) == 0) {
fail &= ~MATCH_RATE;
se0->se_flags |= STA_DEMOTE11B;
}
}
} else if (rate < 2*24) {
/*
* This is an 11b-only ap. Check the desired mode in
* case that needs to be honored (mode 11g filters out
* 11b-only ap's). Otherwise force any 11g channel used
* in scanning to be demoted.
*
* NB: we cheat a bit here by looking at the max rate;
* we could/should check the rates.
*/
if (!(vap->iv_des_mode == IEEE80211_MODE_AUTO ||
vap->iv_des_mode == IEEE80211_MODE_11B))
fail |= MATCH_RATE;
else
se0->se_flags |= STA_DEMOTE11B;
}
if (ss->ss_nssid != 0 &&
!match_ssid(se->se_ssid, ss->ss_nssid, ss->ss_ssid))
fail |= MATCH_SSID;
if ((vap->iv_flags & IEEE80211_F_DESBSSID) &&
!IEEE80211_ADDR_EQ(vap->iv_des_bssid, se->se_bssid))
fail |= MATCH_BSSID;
if (se0->se_fails >= STA_FAILS_MAX)
fail |= MATCH_FAILS;
if (se0->se_notseen >= STA_PURGE_SCANS)
fail |= MATCH_NOTSEEN;
if (se->se_rssi < STA_RSSI_MIN)
fail |= MATCH_RSSI;
#ifdef IEEE80211_DEBUG
if (ieee80211_msg(vap, debug)) {
printf(" %c %s",
fail & MATCH_FAILS ? '=' :
fail & MATCH_NOTSEEN ? '^' :
fail & MATCH_CC ? '$' :
#ifdef IEEE80211_SUPPORT_TDMA
fail & MATCH_TDMA_NOIE ? '&' :
fail & MATCH_TDMA_VERSION ? 'v' :
fail & MATCH_TDMA_NOTMASTER ? 's' :
fail & MATCH_TDMA_NOSLOT ? 'f' :
fail & MATCH_TDMA_LOCAL ? 'l' :
#endif
fail & MATCH_MESH_NOID ? 'm' :
fail ? '-' : '+', ether_sprintf(se->se_macaddr));
printf(" %s%c", ether_sprintf(se->se_bssid),
fail & MATCH_BSSID ? '!' : ' ');
printf(" %3d%c", ieee80211_chan2ieee(ic, se->se_chan),
fail & MATCH_CHANNEL ? '!' : ' ');
printf(" %+4d%c", se->se_rssi, fail & MATCH_RSSI ? '!' : ' ');
printf(" %2dM%c", (rate & IEEE80211_RATE_VAL) / 2,
fail & MATCH_RATE ? '!' : ' ');
printf(" %4s%c",
(se->se_capinfo & IEEE80211_CAPINFO_ESS) ? "ess" :
(se->se_capinfo & IEEE80211_CAPINFO_IBSS) ? "ibss" : "",
fail & MATCH_CAPINFO ? '!' : ' ');
printf(" %3s%c ",
(se->se_capinfo & IEEE80211_CAPINFO_PRIVACY) ?
"wep" : "no",
fail & MATCH_PRIVACY ? '!' : ' ');
ieee80211_print_essid(se->se_ssid+2, se->se_ssid[1]);
printf("%s\n", fail & (MATCH_SSID | MATCH_MESHID) ? "!" : "");
}
#endif
return fail;
}
static void
sta_update_notseen(struct sta_table *st)
{
struct sta_entry *se;
IEEE80211_SCAN_TABLE_LOCK(st);
TAILQ_FOREACH(se, &st->st_entry, se_list) {
/*
* If seen the reset and don't bump the count;
* otherwise bump the ``not seen'' count. Note
* that this insures that stations for which we
* see frames while not scanning but not during
* this scan will not be penalized.
*/
if (se->se_seen)
se->se_seen = 0;
else
se->se_notseen++;
}
IEEE80211_SCAN_TABLE_UNLOCK(st);
}
static void
sta_dec_fails(struct sta_table *st)
{
struct sta_entry *se;
IEEE80211_SCAN_TABLE_LOCK(st);
TAILQ_FOREACH(se, &st->st_entry, se_list)
if (se->se_fails)
se->se_fails--;
IEEE80211_SCAN_TABLE_UNLOCK(st);
}
static struct sta_entry *
select_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap, int debug)
{
struct sta_table *st = ss->ss_priv;
struct sta_entry *se, *selbs = NULL;
IEEE80211_DPRINTF(vap, debug, " %s\n",
"macaddr bssid chan rssi rate flag wep essid");
IEEE80211_SCAN_TABLE_LOCK(st);
TAILQ_FOREACH(se, &st->st_entry, se_list) {
ieee80211_ies_expand(&se->base.se_ies);
if (match_bss(vap, ss, se, debug) == 0) {
if (selbs == NULL)
selbs = se;
else if (sta_compare(se, selbs) > 0)
selbs = se;
}
}
IEEE80211_SCAN_TABLE_UNLOCK(st);
return selbs;
}
/*
* Pick an ap or ibss network to join or find a channel
* to use to start an ibss network.
*/
static int
sta_pick_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
struct sta_table *st = ss->ss_priv;
struct sta_entry *selbs;
struct ieee80211_channel *chan;
KASSERT(vap->iv_opmode == IEEE80211_M_STA,
("wrong mode %u", vap->iv_opmode));
if (st->st_newscan) {
sta_update_notseen(st);
st->st_newscan = 0;
}
if (ss->ss_flags & IEEE80211_SCAN_NOPICK) {
/*
* Manual/background scan, don't select+join the
* bss, just return. The scanning framework will
* handle notification that this has completed.
*/
ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
return 1;
}
/*
* Automatic sequencing; look for a candidate and
* if found join the network.
*/
/* NB: unlocked read should be ok */
if (TAILQ_FIRST(&st->st_entry) == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
"%s: no scan candidate\n", __func__);
if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
return 0;
notfound:
/*
* If nothing suitable was found decrement
* the failure counts so entries will be
* reconsidered the next time around. We
* really want to do this only for sta's
* where we've previously had some success.
*/
sta_dec_fails(st);
st->st_newscan = 1;
return 0; /* restart scan */
}
selbs = select_bss(ss, vap, IEEE80211_MSG_SCAN);
if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
return (selbs != NULL);
if (selbs == NULL)
goto notfound;
chan = selbs->base.se_chan;
if (selbs->se_flags & STA_DEMOTE11B)
chan = demote11b(vap, chan);
if (!ieee80211_sta_join(vap, chan, &selbs->base))
goto notfound;
return 1; /* terminate scan */
}
/*
* Lookup an entry in the scan cache. We assume we're
* called from the bottom half or such that we don't need
* to block the bottom half so that it's safe to return
* a reference to an entry w/o holding the lock on the table.
*/
static struct sta_entry *
sta_lookup(struct sta_table *st, const uint8_t macaddr[IEEE80211_ADDR_LEN])
{
struct sta_entry *se;
int hash = STA_HASH(macaddr);
IEEE80211_SCAN_TABLE_LOCK(st);
LIST_FOREACH(se, &st->st_hash[hash], se_hash)
if (IEEE80211_ADDR_EQ(se->base.se_macaddr, macaddr))
break;
IEEE80211_SCAN_TABLE_UNLOCK(st);
return se; /* NB: unlocked */
}
static void
sta_roam_check(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_node *ni = vap->iv_bss;
struct sta_table *st = ss->ss_priv;
enum ieee80211_phymode mode;
struct sta_entry *se, *selbs;
uint8_t roamRate, curRate, ucastRate;
int8_t roamRssi, curRssi;
se = sta_lookup(st, ni->ni_macaddr);
if (se == NULL) {
/* XXX something is wrong */
return;
}
mode = ieee80211_chan2mode(ic->ic_bsschan);
roamRate = vap->iv_roamparms[mode].rate;
roamRssi = vap->iv_roamparms[mode].rssi;
ucastRate = vap->iv_txparms[mode].ucastrate;
/* NB: the most up to date rssi is in the node, not the scan cache */
curRssi = ic->ic_node_getrssi(ni);
if (ucastRate == IEEE80211_FIXED_RATE_NONE) {
curRate = ni->ni_txrate;
roamRate &= IEEE80211_RATE_VAL;
IEEE80211_DPRINTF(vap, IEEE80211_MSG_ROAM,
"%s: currssi %d currate %u roamrssi %d roamrate %u\n",
__func__, curRssi, curRate, roamRssi, roamRate);
} else {
curRate = roamRate; /* NB: insure compare below fails */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_ROAM,
"%s: currssi %d roamrssi %d\n", __func__, curRssi, roamRssi);
}
/*
* Check if a new ap should be used and switch.
* XXX deauth current ap
*/
if (curRate < roamRate || curRssi < roamRssi) {
if (time_after(ticks, ic->ic_lastscan + vap->iv_scanvalid)) {
/*
* Scan cache contents are too old; force a scan now
* if possible so we have current state to make a
* decision with. We don't kick off a bg scan if
* we're using dynamic turbo and boosted or if the
* channel is busy.
* XXX force immediate switch on scan complete
*/
if (!IEEE80211_IS_CHAN_DTURBO(ic->ic_curchan) &&
time_after(ticks, ic->ic_lastdata + vap->iv_bgscanidle))
ieee80211_bg_scan(vap, 0);
return;
}
se->base.se_rssi = curRssi;
selbs = select_bss(ss, vap, IEEE80211_MSG_ROAM);
if (selbs != NULL && selbs != se) {
struct ieee80211_channel *chan;
IEEE80211_DPRINTF(vap,
IEEE80211_MSG_ROAM | IEEE80211_MSG_DEBUG,
"%s: ROAM: curRate %u, roamRate %u, "
"curRssi %d, roamRssi %d\n", __func__,
curRate, roamRate, curRssi, roamRssi);
chan = selbs->base.se_chan;
if (selbs->se_flags & STA_DEMOTE11B)
chan = demote11b(vap, chan);
(void) ieee80211_sta_join(vap, chan, &selbs->base);
}
}
}
/*
* Age entries in the scan cache.
* XXX also do roaming since it's convenient
*/
static void
sta_age(struct ieee80211_scan_state *ss)
{
struct ieee80211vap *vap = ss->ss_vap;
adhoc_age(ss);
/*
* If rate control is enabled check periodically to see if
* we should roam from our current connection to one that
* might be better. This only applies when we're operating
* in sta mode and automatic roaming is set.
* XXX defer if busy
* XXX repeater station
* XXX do when !bgscan?
*/
KASSERT(vap->iv_opmode == IEEE80211_M_STA,
("wrong mode %u", vap->iv_opmode));
if (vap->iv_roaming == IEEE80211_ROAMING_AUTO &&
(vap->iv_flags & IEEE80211_F_BGSCAN) &&
vap->iv_state >= IEEE80211_S_RUN)
/* XXX vap is implicit */
sta_roam_check(ss, vap);
}
/*
* Iterate over the entries in the scan cache, invoking
* the callback function on each one.
*/
static void
sta_iterate(struct ieee80211_scan_state *ss,
ieee80211_scan_iter_func *f, void *arg)
{
struct sta_table *st = ss->ss_priv;
struct sta_entry *se;
u_int gen;
IEEE80211_SCAN_ITER_LOCK(st);
gen = st->st_scaniter++;
restart:
IEEE80211_SCAN_TABLE_LOCK(st);
TAILQ_FOREACH(se, &st->st_entry, se_list) {
if (se->se_scangen != gen) {
se->se_scangen = gen;
/* update public state */
se->base.se_age = ticks - se->se_lastupdate;
IEEE80211_SCAN_TABLE_UNLOCK(st);
(*f)(arg, &se->base);
goto restart;
}
}
IEEE80211_SCAN_TABLE_UNLOCK(st);
IEEE80211_SCAN_ITER_UNLOCK(st);
}
static void
sta_assoc_fail(struct ieee80211_scan_state *ss,
const uint8_t macaddr[IEEE80211_ADDR_LEN], int reason)
{
struct sta_table *st = ss->ss_priv;
struct sta_entry *se;
se = sta_lookup(st, macaddr);
if (se != NULL) {
se->se_fails++;
se->se_lastfail = ticks;
IEEE80211_NOTE_MAC(ss->ss_vap, IEEE80211_MSG_SCAN,
macaddr, "%s: reason %u fails %u",
__func__, reason, se->se_fails);
}
}
static void
sta_assoc_success(struct ieee80211_scan_state *ss,
const uint8_t macaddr[IEEE80211_ADDR_LEN])
{
struct sta_table *st = ss->ss_priv;
struct sta_entry *se;
se = sta_lookup(st, macaddr);
if (se != NULL) {
#if 0
se->se_fails = 0;
IEEE80211_NOTE_MAC(ss->ss_vap, IEEE80211_MSG_SCAN,
macaddr, "%s: fails %u",
__func__, se->se_fails);
#endif
se->se_lastassoc = ticks;
}
}
static const struct ieee80211_scanner sta_default = {
.scan_name = "default",
.scan_attach = sta_attach,
.scan_detach = sta_detach,
.scan_start = sta_start,
.scan_restart = sta_restart,
.scan_cancel = sta_cancel,
.scan_end = sta_pick_bss,
.scan_flush = sta_flush,
.scan_add = sta_add,
.scan_age = sta_age,
.scan_iterate = sta_iterate,
.scan_assoc_fail = sta_assoc_fail,
.scan_assoc_success = sta_assoc_success,
};
IEEE80211_SCANNER_ALG(sta, IEEE80211_M_STA, sta_default);
/*
* Adhoc mode-specific support.
*/
static const uint16_t adhocWorld[] = /* 36, 40, 44, 48 */
{ 5180, 5200, 5220, 5240 };
static const uint16_t adhocFcc3[] = /* 36, 40, 44, 48 145, 149, 153, 157, 161, 165 */
{ 5180, 5200, 5220, 5240, 5725, 5745, 5765, 5785, 5805, 5825 };
static const uint16_t adhocMkk[] = /* 34, 38, 42, 46 */
{ 5170, 5190, 5210, 5230 };
static const uint16_t adhoc11b[] = /* 10, 11 */
{ 2457, 2462 };
static const struct scanlist adhocScanTable[] = {
{ IEEE80211_MODE_11B, X(adhoc11b) },
{ IEEE80211_MODE_11A, X(adhocWorld) },
{ IEEE80211_MODE_11A, X(adhocFcc3) },
{ IEEE80211_MODE_11B, X(adhocMkk) },
{ .list = NULL }
};
#undef X
/*
* Start an adhoc-mode scan by populating the channel list.
*/
static int
adhoc_start(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
struct sta_table *st = ss->ss_priv;
makescanlist(ss, vap, adhocScanTable);
if (ss->ss_mindwell == 0)
ss->ss_mindwell = msecs_to_ticks(200); /* 200ms */
if (ss->ss_maxdwell == 0)
ss->ss_maxdwell = msecs_to_ticks(200); /* 200ms */
st->st_scangen++;
st->st_newscan = 1;
return 0;
}
/*
* Select a channel to start an adhoc network on.
* The channel list was populated with appropriate
* channels so select one that looks least occupied.
*/
static struct ieee80211_channel *
adhoc_pick_channel(struct ieee80211_scan_state *ss, int flags)
{
struct sta_table *st = ss->ss_priv;
struct sta_entry *se;
struct ieee80211_channel *c, *bestchan;
int i, bestrssi, maxrssi;
bestchan = NULL;
bestrssi = -1;
IEEE80211_SCAN_TABLE_LOCK(st);
for (i = 0; i < ss->ss_last; i++) {
c = ss->ss_chans[i];
/* never consider a channel with radar */
if (IEEE80211_IS_CHAN_RADAR(c))
continue;
/* skip channels disallowed by regulatory settings */
if (IEEE80211_IS_CHAN_NOADHOC(c))
continue;
/* check channel attributes for band compatibility */
if (flags != 0 && (c->ic_flags & flags) != flags)
continue;
maxrssi = 0;
TAILQ_FOREACH(se, &st->st_entry, se_list) {
if (se->base.se_chan != c)
continue;
if (se->base.se_rssi > maxrssi)
maxrssi = se->base.se_rssi;
}
if (bestchan == NULL || maxrssi < bestrssi)
bestchan = c;
}
IEEE80211_SCAN_TABLE_UNLOCK(st);
return bestchan;
}
/*
* Pick an ibss network to join or find a channel
* to use to start an ibss network.
*/
static int
adhoc_pick_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
struct sta_table *st = ss->ss_priv;
struct sta_entry *selbs;
struct ieee80211_channel *chan;
struct ieee80211com *ic = vap->iv_ic;
KASSERT(vap->iv_opmode == IEEE80211_M_IBSS ||
vap->iv_opmode == IEEE80211_M_AHDEMO ||
vap->iv_opmode == IEEE80211_M_MBSS,
("wrong opmode %u", vap->iv_opmode));
if (st->st_newscan) {
sta_update_notseen(st);
st->st_newscan = 0;
}
if (ss->ss_flags & IEEE80211_SCAN_NOPICK) {
/*
* Manual/background scan, don't select+join the
* bss, just return. The scanning framework will
* handle notification that this has completed.
*/
ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
return 1;
}
/*
* Automatic sequencing; look for a candidate and
* if found join the network.
*/
/* NB: unlocked read should be ok */
if (TAILQ_FIRST(&st->st_entry) == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
"%s: no scan candidate\n", __func__);
if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
return 0;
notfound:
/* NB: never auto-start a tdma network for slot !0 */
#ifdef IEEE80211_SUPPORT_TDMA
if (vap->iv_des_nssid &&
((vap->iv_caps & IEEE80211_C_TDMA) == 0 ||
ieee80211_tdma_getslot(vap) == 0)) {
#else
if (vap->iv_des_nssid) {
#endif
/*
* No existing adhoc network to join and we have
* an ssid; start one up. If no channel was
* specified, try to select a channel.
*/
if (vap->iv_des_chan == IEEE80211_CHAN_ANYC ||
IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) {
chan = adhoc_pick_channel(ss, 0);
} else
chan = vap->iv_des_chan;
if (chan != NULL) {
struct ieee80211com *ic = vap->iv_ic;
/*
* Create a HT capable IBSS; the per-node
* probe request/response will result in
* "correct" rate control capabilities being
* negotiated.
*/
chan = ieee80211_ht_adjust_channel(ic,
chan, vap->iv_flags_ht);
ieee80211_create_ibss(vap, chan);
return 1;
}
}
/*
* If nothing suitable was found decrement
* the failure counts so entries will be
* reconsidered the next time around. We
* really want to do this only for sta's
* where we've previously had some success.
*/
sta_dec_fails(st);
st->st_newscan = 1;
return 0; /* restart scan */
}
selbs = select_bss(ss, vap, IEEE80211_MSG_SCAN);
if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
return (selbs != NULL);
if (selbs == NULL)
goto notfound;
chan = selbs->base.se_chan;
if (selbs->se_flags & STA_DEMOTE11B)
chan = demote11b(vap, chan);
/*
* If HT is available, make it a possibility here.
* The intent is to enable HT20/HT40 when joining a non-HT
* IBSS node; we can then advertise HT IEs and speak HT
* to any subsequent nodes that support it.
*/
chan = ieee80211_ht_adjust_channel(ic,
chan, vap->iv_flags_ht);
if (!ieee80211_sta_join(vap, chan, &selbs->base))
goto notfound;
return 1; /* terminate scan */
}
/*
* Age entries in the scan cache.
*/
static void
adhoc_age(struct ieee80211_scan_state *ss)
{
struct sta_table *st = ss->ss_priv;
struct sta_entry *se, *next;
IEEE80211_SCAN_TABLE_LOCK(st);
TAILQ_FOREACH_SAFE(se, &st->st_entry, se_list, next) {
if (se->se_notseen > STA_PURGE_SCANS) {
TAILQ_REMOVE(&st->st_entry, se, se_list);
LIST_REMOVE(se, se_hash);
ieee80211_ies_cleanup(&se->base.se_ies);
IEEE80211_FREE(se, M_80211_SCAN);
}
}
IEEE80211_SCAN_TABLE_UNLOCK(st);
}
static const struct ieee80211_scanner adhoc_default = {
.scan_name = "default",
.scan_attach = sta_attach,
.scan_detach = sta_detach,
.scan_start = adhoc_start,
.scan_restart = sta_restart,
.scan_cancel = sta_cancel,
.scan_end = adhoc_pick_bss,
.scan_flush = sta_flush,
.scan_pickchan = adhoc_pick_channel,
.scan_add = sta_add,
.scan_age = adhoc_age,
.scan_iterate = sta_iterate,
.scan_assoc_fail = sta_assoc_fail,
.scan_assoc_success = sta_assoc_success,
};
IEEE80211_SCANNER_ALG(ibss, IEEE80211_M_IBSS, adhoc_default);
IEEE80211_SCANNER_ALG(ahdemo, IEEE80211_M_AHDEMO, adhoc_default);
-static void
-ap_force_promisc(struct ieee80211com *ic)
-{
- struct ifnet *ifp = ic->ic_ifp;
-
- IEEE80211_LOCK(ic);
- /* set interface into promiscuous mode */
- ifp->if_flags |= IFF_PROMISC;
- ieee80211_runtask(ic, &ic->ic_promisc_task);
- IEEE80211_UNLOCK(ic);
-}
-
-static void
-ap_reset_promisc(struct ieee80211com *ic)
-{
- IEEE80211_LOCK(ic);
- ieee80211_syncifflag_locked(ic, IFF_PROMISC);
- IEEE80211_UNLOCK(ic);
-}
-
static int
ap_start(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
struct sta_table *st = ss->ss_priv;
makescanlist(ss, vap, staScanTable);
if (ss->ss_mindwell == 0)
ss->ss_mindwell = msecs_to_ticks(200); /* 200ms */
if (ss->ss_maxdwell == 0)
ss->ss_maxdwell = msecs_to_ticks(200); /* 200ms */
st->st_scangen++;
st->st_newscan = 1;
- ap_force_promisc(vap->iv_ic);
+ ieee80211_promisc(vap, true);
return 0;
}
/*
* Cancel an ongoing scan.
*/
static int
ap_cancel(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
- ap_reset_promisc(vap->iv_ic);
+ ieee80211_promisc(vap, false);
return 0;
}
/*
* Pick a quiet channel to use for ap operation.
*/
static struct ieee80211_channel *
ap_pick_channel(struct ieee80211_scan_state *ss, int flags)
{
struct sta_table *st = ss->ss_priv;
struct ieee80211_channel *bestchan = NULL;
int i;
/* XXX select channel more intelligently, e.g. channel spread, power */
/* NB: use scan list order to preserve channel preference */
for (i = 0; i < ss->ss_last; i++) {
struct ieee80211_channel *chan = ss->ss_chans[i];
/*
* If the channel is unoccupied the max rssi
* should be zero; just take it. Otherwise
* track the channel with the lowest rssi and
* use that when all channels appear occupied.
*/
if (IEEE80211_IS_CHAN_RADAR(chan))
continue;
if (IEEE80211_IS_CHAN_NOHOSTAP(chan))
continue;
/* check channel attributes for band compatibility */
if (flags != 0 && (chan->ic_flags & flags) != flags)
continue;
KASSERT(sizeof(chan->ic_ieee) == 1, ("ic_chan size"));
/* XXX channel have interference */
if (st->st_maxrssi[chan->ic_ieee] == 0) {
/* XXX use other considerations */
return chan;
}
if (bestchan == NULL ||
st->st_maxrssi[chan->ic_ieee] < st->st_maxrssi[bestchan->ic_ieee])
bestchan = chan;
}
return bestchan;
}
/*
* Pick a quiet channel to use for ap operation.
*/
static int
ap_end(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
struct ieee80211com *ic = vap->iv_ic;
struct ieee80211_channel *bestchan;
KASSERT(vap->iv_opmode == IEEE80211_M_HOSTAP,
("wrong opmode %u", vap->iv_opmode));
bestchan = ap_pick_channel(ss, 0);
if (bestchan == NULL) {
/* no suitable channel, should not happen */
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
"%s: no suitable channel! (should not happen)\n", __func__);
/* XXX print something? */
return 0; /* restart scan */
}
/*
* If this is a dynamic turbo channel, start with the unboosted one.
*/
if (IEEE80211_IS_CHAN_TURBO(bestchan)) {
bestchan = ieee80211_find_channel(ic, bestchan->ic_freq,
bestchan->ic_flags & ~IEEE80211_CHAN_TURBO);
if (bestchan == NULL) {
/* should never happen ?? */
return 0;
}
}
- ap_reset_promisc(ic);
+ ieee80211_promisc(vap, false);
if (ss->ss_flags & (IEEE80211_SCAN_NOPICK | IEEE80211_SCAN_NOJOIN)) {
/*
* Manual/background scan, don't select+join the
* bss, just return. The scanning framework will
* handle notification that this has completed.
*/
ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
return 1;
}
ieee80211_create_ibss(vap,
ieee80211_ht_adjust_channel(ic, bestchan, vap->iv_flags_ht));
return 1;
}
static const struct ieee80211_scanner ap_default = {
.scan_name = "default",
.scan_attach = sta_attach,
.scan_detach = sta_detach,
.scan_start = ap_start,
.scan_restart = sta_restart,
.scan_cancel = ap_cancel,
.scan_end = ap_end,
.scan_flush = sta_flush,
.scan_pickchan = ap_pick_channel,
.scan_add = sta_add,
.scan_age = adhoc_age,
.scan_iterate = sta_iterate,
.scan_assoc_success = sta_assoc_success,
.scan_assoc_fail = sta_assoc_fail,
};
IEEE80211_SCANNER_ALG(ap, IEEE80211_M_HOSTAP, ap_default);
#ifdef IEEE80211_SUPPORT_MESH
/*
* Pick an mbss network to join or find a channel
* to use to start an mbss network.
*/
static int
mesh_pick_bss(struct ieee80211_scan_state *ss, struct ieee80211vap *vap)
{
struct sta_table *st = ss->ss_priv;
struct ieee80211_mesh_state *ms = vap->iv_mesh;
struct sta_entry *selbs;
struct ieee80211_channel *chan;
KASSERT(vap->iv_opmode == IEEE80211_M_MBSS,
("wrong opmode %u", vap->iv_opmode));
if (st->st_newscan) {
sta_update_notseen(st);
st->st_newscan = 0;
}
if (ss->ss_flags & IEEE80211_SCAN_NOPICK) {
/*
* Manual/background scan, don't select+join the
* bss, just return. The scanning framework will
* handle notification that this has completed.
*/
ss->ss_flags &= ~IEEE80211_SCAN_NOPICK;
return 1;
}
/*
* Automatic sequencing; look for a candidate and
* if found join the network.
*/
/* NB: unlocked read should be ok */
if (TAILQ_FIRST(&st->st_entry) == NULL) {
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN,
"%s: no scan candidate\n", __func__);
if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
return 0;
notfound:
if (ms->ms_idlen != 0) {
/*
* No existing mbss network to join and we have
* a meshid; start one up. If no channel was
* specified, try to select a channel.
*/
if (vap->iv_des_chan == IEEE80211_CHAN_ANYC ||
IEEE80211_IS_CHAN_RADAR(vap->iv_des_chan)) {
struct ieee80211com *ic = vap->iv_ic;
chan = adhoc_pick_channel(ss, 0);
if (chan != NULL)
chan = ieee80211_ht_adjust_channel(ic,
chan, vap->iv_flags_ht);
} else
chan = vap->iv_des_chan;
if (chan != NULL) {
ieee80211_create_ibss(vap, chan);
return 1;
}
}
/*
* If nothing suitable was found decrement
* the failure counts so entries will be
* reconsidered the next time around. We
* really want to do this only for sta's
* where we've previously had some success.
*/
sta_dec_fails(st);
st->st_newscan = 1;
return 0; /* restart scan */
}
selbs = select_bss(ss, vap, IEEE80211_MSG_SCAN);
if (ss->ss_flags & IEEE80211_SCAN_NOJOIN)
return (selbs != NULL);
if (selbs == NULL)
goto notfound;
chan = selbs->base.se_chan;
if (selbs->se_flags & STA_DEMOTE11B)
chan = demote11b(vap, chan);
if (!ieee80211_sta_join(vap, chan, &selbs->base))
goto notfound;
return 1; /* terminate scan */
}
static const struct ieee80211_scanner mesh_default = {
.scan_name = "default",
.scan_attach = sta_attach,
.scan_detach = sta_detach,
.scan_start = adhoc_start,
.scan_restart = sta_restart,
.scan_cancel = sta_cancel,
.scan_end = mesh_pick_bss,
.scan_flush = sta_flush,
.scan_pickchan = adhoc_pick_channel,
.scan_add = sta_add,
.scan_age = adhoc_age,
.scan_iterate = sta_iterate,
.scan_assoc_fail = sta_assoc_fail,
.scan_assoc_success = sta_assoc_success,
};
IEEE80211_SCANNER_ALG(mesh, IEEE80211_M_MBSS, mesh_default);
#endif /* IEEE80211_SUPPORT_MESH */
Index: head/sys/net80211/ieee80211_var.h
===================================================================
--- head/sys/net80211/ieee80211_var.h (revision 287196)
+++ head/sys/net80211/ieee80211_var.h (revision 287197)
@@ -1,990 +1,999 @@
/*-
* Copyright (c) 2001 Atsushi Onoe
* Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _NET80211_IEEE80211_VAR_H_
#define _NET80211_IEEE80211_VAR_H_
/*
* Definitions for IEEE 802.11 drivers.
*/
/* NB: portability glue must go first */
#if defined(__NetBSD__)
#include <net80211/ieee80211_netbsd.h>
#elif defined(__FreeBSD__)
#include <net80211/ieee80211_freebsd.h>
#elif defined(__linux__)
#include <net80211/ieee80211_linux.h>
#else
#error "No support for your operating system!"
#endif
#include <net80211/_ieee80211.h>
#include <net80211/ieee80211.h>
#include <net80211/ieee80211_ageq.h>
#include <net80211/ieee80211_crypto.h>
#include <net80211/ieee80211_dfs.h>
#include <net80211/ieee80211_ioctl.h> /* for ieee80211_stats */
#include <net80211/ieee80211_phy.h>
#include <net80211/ieee80211_power.h>
#include <net80211/ieee80211_node.h>
#include <net80211/ieee80211_proto.h>
#include <net80211/ieee80211_radiotap.h>
#include <net80211/ieee80211_scan.h>
#define IEEE80211_TXPOWER_MAX 100 /* .5 dBm (XXX units?) */
#define IEEE80211_TXPOWER_MIN 0 /* kill radio */
#define IEEE80211_DTIM_DEFAULT 1 /* default DTIM period */
#define IEEE80211_BINTVAL_DEFAULT 100 /* default beacon interval (TU's) */
#define IEEE80211_BMISS_MAX 2 /* maximum consecutive bmiss allowed */
#define IEEE80211_HWBMISS_DEFAULT 7 /* h/w bmiss threshold (beacons) */
#define IEEE80211_BGSCAN_INTVAL_MIN 15 /* min bg scan intvl (secs) */
#define IEEE80211_BGSCAN_INTVAL_DEFAULT (5*60) /* default bg scan intvl */
#define IEEE80211_BGSCAN_IDLE_MIN 100 /* min idle time (ms) */
#define IEEE80211_BGSCAN_IDLE_DEFAULT 250 /* default idle time (ms) */
#define IEEE80211_SCAN_VALID_MIN 10 /* min scan valid time (secs) */
#define IEEE80211_SCAN_VALID_DEFAULT 60 /* default scan valid time */
#define IEEE80211_PS_SLEEP 0x1 /* STA is in power saving mode */
#define IEEE80211_PS_MAX_QUEUE 50 /* maximum saved packets */
#define IEEE80211_FIXED_RATE_NONE 0xff
#define IEEE80211_TXMAX_DEFAULT 6 /* default ucast max retries */
#define IEEE80211_RTS_DEFAULT IEEE80211_RTS_MAX
#define IEEE80211_FRAG_DEFAULT IEEE80211_FRAG_MAX
#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
#define IEEE80211_TU_TO_MS(x) (((x) * 1024) / 1000)
#define IEEE80211_TU_TO_TICKS(x)(((x) * 1024 * hz) / (1000 * 1000))
/*
* 802.11 control state is split into a common portion that maps
* 1-1 to a physical device and one or more "Virtual AP's" (VAP)
* that are bound to an ieee80211com instance and share a single
* underlying device. Each VAP has a corresponding OS device
* entity through which traffic flows and that applications use
* for issuing ioctls, etc.
*/
/*
* Data common to one or more virtual AP's. State shared by
* the underlying device and the net80211 layer is exposed here;
* e.g. device-specific callbacks.
*/
struct ieee80211vap;
typedef void (*ieee80211vap_attach)(struct ieee80211vap *);
struct ieee80211_appie {
uint16_t ie_len; /* size of ie_data */
uint8_t ie_data[]; /* user-specified IE's */
};
struct ieee80211_tdma_param;
struct ieee80211_rate_table;
struct ieee80211_tx_ampdu;
struct ieee80211_rx_ampdu;
struct ieee80211_superg;
struct ieee80211_frame;
struct ieee80211com {
- struct ifnet *ic_ifp; /* associated device */
void *ic_softc; /* driver softc */
const char *ic_name; /* usually device name */
ieee80211_com_lock_t ic_comlock; /* state update lock */
ieee80211_tx_lock_t ic_txlock; /* ic/vap TX lock */
+ LIST_ENTRY(ieee80211com) ic_next; /* on global list */
TAILQ_HEAD(, ieee80211vap) ic_vaps; /* list of vap instances */
int ic_headroom; /* driver tx headroom needs */
enum ieee80211_phytype ic_phytype; /* XXX wrong for multi-mode */
enum ieee80211_opmode ic_opmode; /* operation mode */
- struct ifmedia ic_media; /* interface media config */
struct callout ic_inact; /* inactivity processing */
struct taskqueue *ic_tq; /* deferred state thread */
struct task ic_parent_task; /* deferred parent processing */
struct task ic_promisc_task;/* deferred promisc update */
struct task ic_mcast_task; /* deferred mcast update */
struct task ic_chan_task; /* deferred channel change */
struct task ic_bmiss_task; /* deferred beacon miss hndlr */
struct task ic_chw_task; /* deferred HT CHW update */
counter_u64_t ic_ierrors; /* input errors */
counter_u64_t ic_oerrors; /* output errors */
uint32_t ic_flags; /* state flags */
uint32_t ic_flags_ext; /* extended state flags */
uint32_t ic_flags_ht; /* HT state flags */
uint32_t ic_flags_ven; /* vendor state flags */
uint32_t ic_caps; /* capabilities */
uint32_t ic_htcaps; /* HT capabilities */
uint32_t ic_htextcaps; /* HT extended capabilities */
uint32_t ic_cryptocaps; /* crypto capabilities */
uint8_t ic_modecaps[2]; /* set of mode capabilities */
uint8_t ic_promisc; /* vap's needing promisc mode */
uint8_t ic_allmulti; /* vap's needing all multicast*/
uint8_t ic_nrunning; /* vap's marked running */
uint8_t ic_curmode; /* current mode */
+ uint8_t ic_macaddr[IEEE80211_ADDR_LEN];
uint16_t ic_bintval; /* beacon interval */
uint16_t ic_lintval; /* listen interval */
uint16_t ic_holdover; /* PM hold over duration */
uint16_t ic_txpowlimit; /* global tx power limit */
struct ieee80211_rateset ic_sup_rates[IEEE80211_MODE_MAX];
/*
* Channel state:
*
* ic_channels is the set of available channels for the device;
* it is setup by the driver
* ic_nchans is the number of valid entries in ic_channels
* ic_chan_avail is a bit vector of these channels used to check
* whether a channel is available w/o searching the channel table.
* ic_chan_active is a (potentially) constrained subset of
* ic_chan_avail that reflects any mode setting or user-specified
* limit on the set of channels to use/scan
* ic_curchan is the current channel the device is set to; it may
* be different from ic_bsschan when we are off-channel scanning
* or otherwise doing background work
* ic_bsschan is the channel selected for operation; it may
* be undefined (IEEE80211_CHAN_ANYC)
* ic_prevchan is a cached ``previous channel'' used to optimize
* lookups when switching back+forth between two channels
* (e.g. for dynamic turbo)
*/
int ic_nchans; /* # entries in ic_channels */
struct ieee80211_channel ic_channels[IEEE80211_CHAN_MAX];
uint8_t ic_chan_avail[IEEE80211_CHAN_BYTES];
uint8_t ic_chan_active[IEEE80211_CHAN_BYTES];
uint8_t ic_chan_scan[IEEE80211_CHAN_BYTES];
struct ieee80211_channel *ic_curchan; /* current channel */
const struct ieee80211_rate_table *ic_rt; /* table for ic_curchan */
struct ieee80211_channel *ic_bsschan; /* bss channel */
struct ieee80211_channel *ic_prevchan; /* previous channel */
struct ieee80211_regdomain ic_regdomain;/* regulatory data */
struct ieee80211_appie *ic_countryie; /* calculated country ie */
struct ieee80211_channel *ic_countryie_chan;
/* 802.11h/DFS state */
struct ieee80211_channel *ic_csa_newchan;/* channel for doing CSA */
short ic_csa_mode; /* mode for doing CSA */
short ic_csa_count; /* count for doing CSA */
struct ieee80211_dfs_state ic_dfs; /* DFS state */
struct ieee80211_scan_state *ic_scan; /* scan state */
struct ieee80211_scan_methods *ic_scan_methods; /* scan methods */
int ic_lastdata; /* time of last data frame */
int ic_lastscan; /* time last scan completed */
/* NB: this is the union of all vap stations/neighbors */
int ic_max_keyix; /* max h/w key index */
struct ieee80211_node_table ic_sta; /* stations/neighbors */
struct ieee80211_ageq ic_stageq; /* frame staging queue */
uint32_t ic_hash_key; /* random key for mac hash */
/* XXX multi-bss: split out common/vap parts */
struct ieee80211_wme_state ic_wme; /* WME/WMM state */
/* XXX multi-bss: can per-vap be done/make sense? */
enum ieee80211_protmode ic_protmode; /* 802.11g protection mode */
uint16_t ic_nonerpsta; /* # non-ERP stations */
uint16_t ic_longslotsta; /* # long slot time stations */
uint16_t ic_sta_assoc; /* stations associated */
uint16_t ic_ht_sta_assoc;/* HT stations associated */
uint16_t ic_ht40_sta_assoc;/* HT40 stations associated */
uint8_t ic_curhtprotmode;/* HTINFO bss state */
enum ieee80211_protmode ic_htprotmode; /* HT protection mode */
int ic_lastnonerp; /* last time non-ERP sta noted*/
int ic_lastnonht; /* last time non-HT sta noted */
uint8_t ic_rxstream; /* # RX streams */
uint8_t ic_txstream; /* # TX streams */
/* optional state for Atheros SuperG protocol extensions */
struct ieee80211_superg *ic_superg;
/* radiotap handling */
struct ieee80211_radiotap_header *ic_th;/* tx radiotap headers */
void *ic_txchan; /* channel state in ic_th */
struct ieee80211_radiotap_header *ic_rh;/* rx radiotap headers */
void *ic_rxchan; /* channel state in ic_rh */
int ic_montaps; /* active monitor mode taps */
/* virtual ap create/delete */
struct ieee80211vap* (*ic_vap_create)(struct ieee80211com *,
const char [IFNAMSIZ], int,
enum ieee80211_opmode, int,
const uint8_t [IEEE80211_ADDR_LEN],
const uint8_t [IEEE80211_ADDR_LEN]);
void (*ic_vap_delete)(struct ieee80211vap *);
+ /* device specific ioctls */
+ int (*ic_ioctl)(struct ieee80211com *,
+ u_long, void *);
+ /* start/stop device */
+ void (*ic_parent)(struct ieee80211com *);
/* operating mode attachment */
ieee80211vap_attach ic_vattach[IEEE80211_OPMODE_MAX];
/* return hardware/radio capabilities */
void (*ic_getradiocaps)(struct ieee80211com *,
int, int *, struct ieee80211_channel []);
/* check and/or prepare regdomain state change */
int (*ic_setregdomain)(struct ieee80211com *,
struct ieee80211_regdomain *,
int, struct ieee80211_channel []);
int (*ic_set_quiet)(struct ieee80211_node *,
u_int8_t *quiet_elm);
+ /* regular transmit */
+ int (*ic_transmit)(struct ieee80211com *,
+ struct mbuf *);
/* send/recv 802.11 management frame */
int (*ic_send_mgmt)(struct ieee80211_node *,
int, int);
/* send raw 802.11 frame */
int (*ic_raw_xmit)(struct ieee80211_node *,
struct mbuf *,
const struct ieee80211_bpf_params *);
/* update device state for 802.11 slot time change */
void (*ic_updateslot)(struct ieee80211com *);
/* handle multicast state changes */
void (*ic_update_mcast)(struct ieee80211com *);
/* handle promiscuous mode changes */
void (*ic_update_promisc)(struct ieee80211com *);
/* new station association callback/notification */
void (*ic_newassoc)(struct ieee80211_node *, int);
/* TDMA update notification */
void (*ic_tdma_update)(struct ieee80211_node *,
const struct ieee80211_tdma_param *, int);
/* node state management */
struct ieee80211_node* (*ic_node_alloc)(struct ieee80211vap *,
const uint8_t [IEEE80211_ADDR_LEN]);
void (*ic_node_free)(struct ieee80211_node *);
void (*ic_node_cleanup)(struct ieee80211_node *);
void (*ic_node_age)(struct ieee80211_node *);
void (*ic_node_drain)(struct ieee80211_node *);
int8_t (*ic_node_getrssi)(const struct ieee80211_node*);
void (*ic_node_getsignal)(const struct ieee80211_node*,
int8_t *, int8_t *);
void (*ic_node_getmimoinfo)(
const struct ieee80211_node*,
struct ieee80211_mimo_info *);
/* scanning support */
void (*ic_scan_start)(struct ieee80211com *);
void (*ic_scan_end)(struct ieee80211com *);
void (*ic_set_channel)(struct ieee80211com *);
void (*ic_scan_curchan)(struct ieee80211_scan_state *,
unsigned long);
void (*ic_scan_mindwell)(struct ieee80211_scan_state *);
/*
* 802.11n ADDBA support. A simple/generic implementation
* of A-MPDU tx aggregation is provided; the driver may
* override these methods to provide their own support.
* A-MPDU rx re-ordering happens automatically if the
* driver passes out-of-order frames to ieee80211_input
* from an assocated HT station.
*/
int (*ic_recv_action)(struct ieee80211_node *,
const struct ieee80211_frame *,
const uint8_t *frm, const uint8_t *efrm);
int (*ic_send_action)(struct ieee80211_node *,
int category, int action, void *);
/* check if A-MPDU should be enabled this station+ac */
int (*ic_ampdu_enable)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
/* start/stop doing A-MPDU tx aggregation for a station */
int (*ic_addba_request)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *,
int dialogtoken, int baparamset,
int batimeout);
int (*ic_addba_response)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *,
int status, int baparamset, int batimeout);
void (*ic_addba_stop)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
void (*ic_addba_response_timeout)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
/* BAR response received */
void (*ic_bar_response)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int status);
/* start/stop doing A-MPDU rx processing for a station */
int (*ic_ampdu_rx_start)(struct ieee80211_node *,
struct ieee80211_rx_ampdu *, int baparamset,
int batimeout, int baseqctl);
void (*ic_ampdu_rx_stop)(struct ieee80211_node *,
struct ieee80211_rx_ampdu *);
/* The channel width has changed (20<->2040) */
void (*ic_update_chw)(struct ieee80211com *);
uint64_t ic_spare[7];
};
struct ieee80211_aclator;
struct ieee80211_tdma_state;
struct ieee80211_mesh_state;
struct ieee80211_hwmp_state;
struct ieee80211vap {
struct ifmedia iv_media; /* interface media config */
struct ifnet *iv_ifp; /* associated device */
struct bpf_if *iv_rawbpf; /* packet filter structure */
struct sysctl_ctx_list *iv_sysctl; /* dynamic sysctl context */
struct sysctl_oid *iv_oid; /* net.wlan.X sysctl oid */
TAILQ_ENTRY(ieee80211vap) iv_next; /* list of vap instances */
struct ieee80211com *iv_ic; /* back ptr to common state */
+ const uint8_t *iv_myaddr; /* MAC address: ifp or ic */
uint32_t iv_debug; /* debug msg flags */
struct ieee80211_stats iv_stats; /* statistics */
- uint8_t iv_myaddr[IEEE80211_ADDR_LEN];
uint32_t iv_flags; /* state flags */
uint32_t iv_flags_ext; /* extended state flags */
uint32_t iv_flags_ht; /* HT state flags */
uint32_t iv_flags_ven; /* vendor state flags */
+ uint32_t iv_ifflags; /* ifnet flags */
uint32_t iv_caps; /* capabilities */
uint32_t iv_htcaps; /* HT capabilities */
uint32_t iv_htextcaps; /* HT extended capabilities */
enum ieee80211_opmode iv_opmode; /* operation mode */
enum ieee80211_state iv_state; /* state machine state */
enum ieee80211_state iv_nstate; /* pending state */
int iv_nstate_arg; /* pending state arg */
struct task iv_nstate_task; /* deferred state processing */
struct task iv_swbmiss_task;/* deferred iv_bmiss call */
struct callout iv_mgtsend; /* mgmt frame response timer */
/* inactivity timer settings */
int iv_inact_init; /* setting for new station */
int iv_inact_auth; /* auth but not assoc setting */
int iv_inact_run; /* authorized setting */
int iv_inact_probe; /* inactive probe time */
int iv_des_nssid; /* # desired ssids */
struct ieee80211_scan_ssid iv_des_ssid[1];/* desired ssid table */
uint8_t iv_des_bssid[IEEE80211_ADDR_LEN];
struct ieee80211_channel *iv_des_chan; /* desired channel */
uint16_t iv_des_mode; /* desired mode */
int iv_nicknamelen; /* XXX junk */
uint8_t iv_nickname[IEEE80211_NWID_LEN];
u_int iv_bgscanidle; /* bg scan idle threshold */
u_int iv_bgscanintvl; /* bg scan min interval */
u_int iv_scanvalid; /* scan cache valid threshold */
u_int iv_scanreq_duration;
u_int iv_scanreq_mindwell;
u_int iv_scanreq_maxdwell;
uint16_t iv_scanreq_flags;/* held scan request params */
uint8_t iv_scanreq_nssid;
struct ieee80211_scan_ssid iv_scanreq_ssid[IEEE80211_SCAN_MAX_SSID];
/* sta-mode roaming state */
enum ieee80211_roamingmode iv_roaming; /* roaming mode */
struct ieee80211_roamparam iv_roamparms[IEEE80211_MODE_MAX];
uint8_t iv_bmissthreshold;
uint8_t iv_bmiss_count; /* current beacon miss count */
int iv_bmiss_max; /* max bmiss before scan */
uint16_t iv_swbmiss_count;/* beacons in last period */
uint16_t iv_swbmiss_period;/* s/w bmiss period */
struct callout iv_swbmiss; /* s/w beacon miss timer */
int iv_ampdu_rxmax; /* A-MPDU rx limit (bytes) */
int iv_ampdu_density;/* A-MPDU density */
int iv_ampdu_limit; /* A-MPDU tx limit (bytes) */
int iv_amsdu_limit; /* A-MSDU tx limit (bytes) */
u_int iv_ampdu_mintraffic[WME_NUM_AC];
uint32_t *iv_aid_bitmap; /* association id map */
uint16_t iv_max_aid;
uint16_t iv_sta_assoc; /* stations associated */
uint16_t iv_ps_sta; /* stations in power save */
uint16_t iv_ps_pending; /* ps sta's w/ pending frames */
uint16_t iv_txseq; /* mcast xmit seq# space */
uint16_t iv_tim_len; /* ic_tim_bitmap size (bytes) */
uint8_t *iv_tim_bitmap; /* power-save stations w/ data*/
uint8_t iv_dtim_period; /* DTIM period */
uint8_t iv_dtim_count; /* DTIM count from last bcn */
/* set/unset aid pwrsav state */
uint8_t iv_quiet; /* Quiet Element */
uint8_t iv_quiet_count; /* constant count for Quiet Element */
uint8_t iv_quiet_count_value; /* variable count for Quiet Element */
uint8_t iv_quiet_period; /* period for Quiet Element */
uint16_t iv_quiet_duration; /* duration for Quiet Element */
uint16_t iv_quiet_offset; /* offset for Quiet Element */
int iv_csa_count; /* count for doing CSA */
struct ieee80211_node *iv_bss; /* information for this node */
struct ieee80211_txparam iv_txparms[IEEE80211_MODE_MAX];
uint16_t iv_rtsthreshold;
uint16_t iv_fragthreshold;
int iv_inact_timer; /* inactivity timer wait */
/* application-specified IE's to attach to mgt frames */
struct ieee80211_appie *iv_appie_beacon;
struct ieee80211_appie *iv_appie_probereq;
struct ieee80211_appie *iv_appie_proberesp;
struct ieee80211_appie *iv_appie_assocreq;
struct ieee80211_appie *iv_appie_assocresp;
struct ieee80211_appie *iv_appie_wpa;
uint8_t *iv_wpa_ie;
uint8_t *iv_rsn_ie;
uint16_t iv_max_keyix; /* max h/w key index */
ieee80211_keyix iv_def_txkey; /* default/group tx key index */
struct ieee80211_key iv_nw_keys[IEEE80211_WEP_NKID];
int (*iv_key_alloc)(struct ieee80211vap *,
struct ieee80211_key *,
ieee80211_keyix *, ieee80211_keyix *);
int (*iv_key_delete)(struct ieee80211vap *,
const struct ieee80211_key *);
int (*iv_key_set)(struct ieee80211vap *,
const struct ieee80211_key *,
const uint8_t mac[IEEE80211_ADDR_LEN]);
void (*iv_key_update_begin)(struct ieee80211vap *);
void (*iv_key_update_end)(struct ieee80211vap *);
const struct ieee80211_authenticator *iv_auth; /* authenticator glue */
void *iv_ec; /* private auth state */
const struct ieee80211_aclator *iv_acl; /* acl glue */
void *iv_as; /* private aclator state */
const struct ieee80211_ratectl *iv_rate;
void *iv_rs; /* private ratectl state */
struct ieee80211_tdma_state *iv_tdma; /* tdma state */
struct ieee80211_mesh_state *iv_mesh; /* MBSS state */
struct ieee80211_hwmp_state *iv_hwmp; /* HWMP state */
/* operate-mode detach hook */
void (*iv_opdetach)(struct ieee80211vap *);
/* receive processing */
int (*iv_input)(struct ieee80211_node *,
struct mbuf *,
const struct ieee80211_rx_stats *,
int, int);
void (*iv_recv_mgmt)(struct ieee80211_node *,
struct mbuf *, int,
const struct ieee80211_rx_stats *,
int, int);
void (*iv_recv_ctl)(struct ieee80211_node *,
struct mbuf *, int);
void (*iv_deliver_data)(struct ieee80211vap *,
struct ieee80211_node *, struct mbuf *);
#if 0
/* send processing */
int (*iv_send_mgmt)(struct ieee80211_node *,
int, int);
#endif
/* beacon miss processing */
void (*iv_bmiss)(struct ieee80211vap *);
/* reset device state after 802.11 parameter/state change */
int (*iv_reset)(struct ieee80211vap *, u_long);
/* [schedule] beacon frame update */
void (*iv_update_beacon)(struct ieee80211vap *, int);
/* power save handling */
void (*iv_update_ps)(struct ieee80211vap *, int);
int (*iv_set_tim)(struct ieee80211_node *, int);
void (*iv_node_ps)(struct ieee80211_node *, int);
void (*iv_sta_ps)(struct ieee80211vap *, int);
void (*iv_recv_pspoll)(struct ieee80211_node *,
struct mbuf *);
/* state machine processing */
int (*iv_newstate)(struct ieee80211vap *,
enum ieee80211_state, int);
/* 802.3 output method for raw frame xmit */
int (*iv_output)(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *);
uint64_t iv_spare[6];
};
MALLOC_DECLARE(M_80211_VAP);
#define IEEE80211_ADDR_EQ(a1,a2) (memcmp(a1,a2,IEEE80211_ADDR_LEN) == 0)
#define IEEE80211_ADDR_COPY(dst,src) memcpy(dst,src,IEEE80211_ADDR_LEN)
/* ic_flags/iv_flags */
#define IEEE80211_F_TURBOP 0x00000001 /* CONF: ATH Turbo enabled*/
#define IEEE80211_F_COMP 0x00000002 /* CONF: ATH comp enabled */
#define IEEE80211_F_FF 0x00000004 /* CONF: ATH FF enabled */
#define IEEE80211_F_BURST 0x00000008 /* CONF: bursting enabled */
/* NB: this is intentionally setup to be IEEE80211_CAPINFO_PRIVACY */
#define IEEE80211_F_PRIVACY 0x00000010 /* CONF: privacy enabled */
#define IEEE80211_F_PUREG 0x00000020 /* CONF: 11g w/o 11b sta's */
#define IEEE80211_F_SCAN 0x00000080 /* STATUS: scanning */
#define IEEE80211_F_ASCAN 0x00000100 /* STATUS: active scan */
#define IEEE80211_F_SIBSS 0x00000200 /* STATUS: start IBSS */
/* NB: this is intentionally setup to be IEEE80211_CAPINFO_SHORT_SLOTTIME */
#define IEEE80211_F_SHSLOT 0x00000400 /* STATUS: use short slot time*/
#define IEEE80211_F_PMGTON 0x00000800 /* CONF: Power mgmt enable */
#define IEEE80211_F_DESBSSID 0x00001000 /* CONF: des_bssid is set */
#define IEEE80211_F_WME 0x00002000 /* CONF: enable WME use */
#define IEEE80211_F_BGSCAN 0x00004000 /* CONF: bg scan enabled (???)*/
#define IEEE80211_F_SWRETRY 0x00008000 /* CONF: sw tx retry enabled */
#define IEEE80211_F_TXPOW_FIXED 0x00010000 /* TX Power: fixed rate */
#define IEEE80211_F_IBSSON 0x00020000 /* CONF: IBSS creation enable */
#define IEEE80211_F_SHPREAMBLE 0x00040000 /* STATUS: use short preamble */
#define IEEE80211_F_DATAPAD 0x00080000 /* CONF: do alignment pad */
#define IEEE80211_F_USEPROT 0x00100000 /* STATUS: protection enabled */
#define IEEE80211_F_USEBARKER 0x00200000 /* STATUS: use barker preamble*/
#define IEEE80211_F_CSAPENDING 0x00400000 /* STATUS: chan switch pending*/
#define IEEE80211_F_WPA1 0x00800000 /* CONF: WPA enabled */
#define IEEE80211_F_WPA2 0x01000000 /* CONF: WPA2 enabled */
#define IEEE80211_F_WPA 0x01800000 /* CONF: WPA/WPA2 enabled */
#define IEEE80211_F_DROPUNENC 0x02000000 /* CONF: drop unencrypted */
#define IEEE80211_F_COUNTERM 0x04000000 /* CONF: TKIP countermeasures */
#define IEEE80211_F_HIDESSID 0x08000000 /* CONF: hide SSID in beacon */
#define IEEE80211_F_NOBRIDGE 0x10000000 /* CONF: dis. internal bridge */
#define IEEE80211_F_PCF 0x20000000 /* CONF: PCF enabled */
#define IEEE80211_F_DOTH 0x40000000 /* CONF: 11h enabled */
#define IEEE80211_F_DWDS 0x80000000 /* CONF: Dynamic WDS enabled */
#define IEEE80211_F_BITS \
"\20\1TURBOP\2COMP\3FF\4BURST\5PRIVACY\6PUREG\10SCAN\11ASCAN\12SIBSS" \
"\13SHSLOT\14PMGTON\15DESBSSID\16WME\17BGSCAN\20SWRETRY\21TXPOW_FIXED" \
"\22IBSSON\23SHPREAMBLE\24DATAPAD\25USEPROT\26USERBARKER\27CSAPENDING" \
"\30WPA1\31WPA2\32DROPUNENC\33COUNTERM\34HIDESSID\35NOBRIDG\36PCF" \
"\37DOTH\40DWDS"
/* Atheros protocol-specific flags */
#define IEEE80211_F_ATHEROS \
(IEEE80211_F_FF | IEEE80211_F_COMP | IEEE80211_F_TURBOP)
/* Check if an Atheros capability was negotiated for use */
#define IEEE80211_ATH_CAP(vap, ni, bit) \
((vap)->iv_flags & (ni)->ni_ath_flags & (bit))
/* ic_flags_ext/iv_flags_ext */
#define IEEE80211_FEXT_INACT 0x00000002 /* CONF: sta inact handling */
#define IEEE80211_FEXT_SCANWAIT 0x00000004 /* STATUS: awaiting scan */
/* 0x00000006 reserved */
#define IEEE80211_FEXT_BGSCAN 0x00000008 /* STATUS: complete bgscan */
#define IEEE80211_FEXT_WPS 0x00000010 /* CONF: WPS enabled */
#define IEEE80211_FEXT_TSN 0x00000020 /* CONF: TSN enabled */
#define IEEE80211_FEXT_SCANREQ 0x00000040 /* STATUS: scan req params */
#define IEEE80211_FEXT_RESUME 0x00000080 /* STATUS: start on resume */
#define IEEE80211_FEXT_4ADDR 0x00000100 /* CONF: apply 4-addr encap */
#define IEEE80211_FEXT_NONERP_PR 0x00000200 /* STATUS: non-ERP sta present*/
#define IEEE80211_FEXT_SWBMISS 0x00000400 /* CONF: do bmiss in s/w */
#define IEEE80211_FEXT_DFS 0x00000800 /* CONF: DFS enabled */
#define IEEE80211_FEXT_DOTD 0x00001000 /* CONF: 11d enabled */
#define IEEE80211_FEXT_STATEWAIT 0x00002000 /* STATUS: awaiting state chg */
#define IEEE80211_FEXT_REINIT 0x00004000 /* STATUS: INIT state first */
#define IEEE80211_FEXT_BPF 0x00008000 /* STATUS: BPF tap present */
/* NB: immutable: should be set only when creating a vap */
#define IEEE80211_FEXT_WDSLEGACY 0x00010000 /* CONF: legacy WDS operation */
#define IEEE80211_FEXT_PROBECHAN 0x00020000 /* CONF: probe passive channel*/
#define IEEE80211_FEXT_UNIQMAC 0x00040000 /* CONF: user or computed mac */
#define IEEE80211_FEXT_BITS \
"\20\2INACT\3SCANWAIT\4BGSCAN\5WPS\6TSN\7SCANREQ\10RESUME" \
"\0114ADDR\12NONEPR_PR\13SWBMISS\14DFS\15DOTD\16STATEWAIT\17REINIT" \
"\20BPF\21WDSLEGACY\22PROBECHAN\23UNIQMAC"
/* ic_flags_ht/iv_flags_ht */
#define IEEE80211_FHT_NONHT_PR 0x00000001 /* STATUS: non-HT sta present */
#define IEEE80211_FHT_GF 0x00040000 /* CONF: Greenfield enabled */
#define IEEE80211_FHT_HT 0x00080000 /* CONF: HT supported */
#define IEEE80211_FHT_AMPDU_TX 0x00100000 /* CONF: A-MPDU tx supported */
#define IEEE80211_FHT_AMPDU_RX 0x00200000 /* CONF: A-MPDU rx supported */
#define IEEE80211_FHT_AMSDU_TX 0x00400000 /* CONF: A-MSDU tx supported */
#define IEEE80211_FHT_AMSDU_RX 0x00800000 /* CONF: A-MSDU rx supported */
#define IEEE80211_FHT_USEHT40 0x01000000 /* CONF: 20/40 use enabled */
#define IEEE80211_FHT_PUREN 0x02000000 /* CONF: 11n w/o legacy sta's */
#define IEEE80211_FHT_SHORTGI20 0x04000000 /* CONF: short GI in HT20 */
#define IEEE80211_FHT_SHORTGI40 0x08000000 /* CONF: short GI in HT40 */
#define IEEE80211_FHT_HTCOMPAT 0x10000000 /* CONF: HT vendor OUI's */
#define IEEE80211_FHT_RIFS 0x20000000 /* CONF: RIFS enabled */
#define IEEE80211_FHT_STBC_TX 0x40000000 /* CONF: STBC tx enabled */
#define IEEE80211_FHT_STBC_RX 0x80000000 /* CONF: STBC rx enabled */
#define IEEE80211_FHT_BITS \
"\20\1NONHT_PR" \
"\23GF\24HT\25AMPDU_TX\26AMPDU_TX" \
"\27AMSDU_TX\30AMSDU_RX\31USEHT40\32PUREN\33SHORTGI20\34SHORTGI40" \
"\35HTCOMPAT\36RIFS\37STBC_TX\40STBC_RX"
#define IEEE80211_FVEN_BITS "\20"
/* ic_caps/iv_caps: device driver capabilities */
/* 0x2e available */
#define IEEE80211_C_STA 0x00000001 /* CAPABILITY: STA available */
#define IEEE80211_C_8023ENCAP 0x00000002 /* CAPABILITY: 802.3 encap */
#define IEEE80211_C_FF 0x00000040 /* CAPABILITY: ATH FF avail */
#define IEEE80211_C_TURBOP 0x00000080 /* CAPABILITY: ATH Turbo avail*/
#define IEEE80211_C_IBSS 0x00000100 /* CAPABILITY: IBSS available */
#define IEEE80211_C_PMGT 0x00000200 /* CAPABILITY: Power mgmt */
#define IEEE80211_C_HOSTAP 0x00000400 /* CAPABILITY: HOSTAP avail */
#define IEEE80211_C_AHDEMO 0x00000800 /* CAPABILITY: Old Adhoc Demo */
#define IEEE80211_C_SWRETRY 0x00001000 /* CAPABILITY: sw tx retry */
#define IEEE80211_C_TXPMGT 0x00002000 /* CAPABILITY: tx power mgmt */
#define IEEE80211_C_SHSLOT 0x00004000 /* CAPABILITY: short slottime */
#define IEEE80211_C_SHPREAMBLE 0x00008000 /* CAPABILITY: short preamble */
#define IEEE80211_C_MONITOR 0x00010000 /* CAPABILITY: monitor mode */
#define IEEE80211_C_DFS 0x00020000 /* CAPABILITY: DFS/radar avail*/
#define IEEE80211_C_MBSS 0x00040000 /* CAPABILITY: MBSS available */
#define IEEE80211_C_SWSLEEP 0x00080000 /* CAPABILITY: do sleep here */
/* 0x7c0000 available */
#define IEEE80211_C_WPA1 0x00800000 /* CAPABILITY: WPA1 avail */
#define IEEE80211_C_WPA2 0x01000000 /* CAPABILITY: WPA2 avail */
#define IEEE80211_C_WPA 0x01800000 /* CAPABILITY: WPA1+WPA2 avail*/
#define IEEE80211_C_BURST 0x02000000 /* CAPABILITY: frame bursting */
#define IEEE80211_C_WME 0x04000000 /* CAPABILITY: WME avail */
#define IEEE80211_C_WDS 0x08000000 /* CAPABILITY: 4-addr support */
/* 0x10000000 reserved */
#define IEEE80211_C_BGSCAN 0x20000000 /* CAPABILITY: bg scanning */
#define IEEE80211_C_TXFRAG 0x40000000 /* CAPABILITY: tx fragments */
#define IEEE80211_C_TDMA 0x80000000 /* CAPABILITY: TDMA avail */
/* XXX protection/barker? */
#define IEEE80211_C_OPMODE \
(IEEE80211_C_STA | IEEE80211_C_IBSS | IEEE80211_C_HOSTAP | \
IEEE80211_C_AHDEMO | IEEE80211_C_MONITOR | IEEE80211_C_WDS | \
IEEE80211_C_TDMA | IEEE80211_C_MBSS)
#define IEEE80211_C_BITS \
"\20\1STA\002803ENCAP\7FF\10TURBOP\11IBSS\12PMGT" \
"\13HOSTAP\14AHDEMO\15SWRETRY\16TXPMGT\17SHSLOT\20SHPREAMBLE" \
"\21MONITOR\22DFS\23MBSS\30WPA1\31WPA2\32BURST\33WME\34WDS\36BGSCAN" \
"\37TXFRAG\40TDMA"
/*
* ic_htcaps/iv_htcaps: HT-specific device/driver capabilities
*
* NB: the low 16-bits are the 802.11 definitions, the upper
* 16-bits are used to define s/w/driver capabilities.
*/
#define IEEE80211_HTC_AMPDU 0x00010000 /* CAPABILITY: A-MPDU tx */
#define IEEE80211_HTC_AMSDU 0x00020000 /* CAPABILITY: A-MSDU tx */
/* NB: HT40 is implied by IEEE80211_HTCAP_CHWIDTH40 */
#define IEEE80211_HTC_HT 0x00040000 /* CAPABILITY: HT operation */
#define IEEE80211_HTC_SMPS 0x00080000 /* CAPABILITY: MIMO power save*/
#define IEEE80211_HTC_RIFS 0x00100000 /* CAPABILITY: RIFS support */
#define IEEE80211_HTC_RXUNEQUAL 0x00200000 /* CAPABILITY: RX unequal MCS */
#define IEEE80211_HTC_RXMCS32 0x00400000 /* CAPABILITY: MCS32 support */
#define IEEE80211_HTC_TXUNEQUAL 0x00800000 /* CAPABILITY: TX unequal MCS */
#define IEEE80211_HTC_TXMCS32 0x01000000 /* CAPABILITY: MCS32 suport */
#define IEEE80211_C_HTCAP_BITS \
"\20\1LDPC\2CHWIDTH40\5GREENFIELD\6SHORTGI20\7SHORTGI40\10TXSTBC" \
"\21AMPDU\22AMSDU\23HT\24SMPS\25RIFS"
int ic_printf(struct ieee80211com *, const char *, ...) __printflike(2, 3);
-void ieee80211_ifattach(struct ieee80211com *,
- const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+void ieee80211_ifattach(struct ieee80211com *);
void ieee80211_ifdetach(struct ieee80211com *);
int ieee80211_vap_setup(struct ieee80211com *, struct ieee80211vap *,
const char name[IFNAMSIZ], int unit,
enum ieee80211_opmode opmode, int flags,
- const uint8_t bssid[IEEE80211_ADDR_LEN],
- const uint8_t macaddr[IEEE80211_ADDR_LEN]);
+ const uint8_t bssid[IEEE80211_ADDR_LEN]);
int ieee80211_vap_attach(struct ieee80211vap *,
- ifm_change_cb_t, ifm_stat_cb_t);
+ ifm_change_cb_t, ifm_stat_cb_t,
+ const uint8_t macaddr[IEEE80211_ADDR_LEN]);
void ieee80211_vap_detach(struct ieee80211vap *);
const struct ieee80211_rateset *ieee80211_get_suprates(struct ieee80211com *ic,
const struct ieee80211_channel *);
void ieee80211_announce(struct ieee80211com *);
void ieee80211_announce_channels(struct ieee80211com *);
void ieee80211_drain(struct ieee80211com *);
-void ieee80211_media_init(struct ieee80211com *);
+void ieee80211_chan_init(struct ieee80211com *);
struct ieee80211com *ieee80211_find_vap(const uint8_t mac[IEEE80211_ADDR_LEN]);
+struct ieee80211com *ieee80211_find_com(const char *name);
int ieee80211_media_change(struct ifnet *);
void ieee80211_media_status(struct ifnet *, struct ifmediareq *);
int ieee80211_ioctl(struct ifnet *, u_long, caddr_t);
int ieee80211_rate2media(struct ieee80211com *, int,
enum ieee80211_phymode);
int ieee80211_media2rate(int);
int ieee80211_mhz2ieee(u_int, u_int);
int ieee80211_chan2ieee(struct ieee80211com *,
const struct ieee80211_channel *);
u_int ieee80211_ieee2mhz(u_int, u_int);
struct ieee80211_channel *ieee80211_find_channel(struct ieee80211com *,
int freq, int flags);
struct ieee80211_channel *ieee80211_find_channel_byieee(struct ieee80211com *,
int ieee, int flags);
struct ieee80211_channel *ieee80211_lookup_channel_rxstatus(struct ieee80211vap *,
const struct ieee80211_rx_stats *);
int ieee80211_setmode(struct ieee80211com *, enum ieee80211_phymode);
enum ieee80211_phymode ieee80211_chan2mode(const struct ieee80211_channel *);
uint32_t ieee80211_mac_hash(const struct ieee80211com *,
const uint8_t addr[IEEE80211_ADDR_LEN]);
char ieee80211_channel_type_char(const struct ieee80211_channel *c);
void ieee80211_radiotap_attach(struct ieee80211com *,
struct ieee80211_radiotap_header *th, int tlen,
uint32_t tx_radiotap,
struct ieee80211_radiotap_header *rh, int rlen,
uint32_t rx_radiotap);
void ieee80211_radiotap_attachv(struct ieee80211com *,
struct ieee80211_radiotap_header *th,
int tlen, int n_tx_v, uint32_t tx_radiotap,
struct ieee80211_radiotap_header *rh,
int rlen, int n_rx_v, uint32_t rx_radiotap);
void ieee80211_radiotap_detach(struct ieee80211com *);
void ieee80211_radiotap_vattach(struct ieee80211vap *);
void ieee80211_radiotap_vdetach(struct ieee80211vap *);
void ieee80211_radiotap_chan_change(struct ieee80211com *);
void ieee80211_radiotap_tx(struct ieee80211vap *, struct mbuf *);
void ieee80211_radiotap_rx(struct ieee80211vap *, struct mbuf *);
void ieee80211_radiotap_rx_all(struct ieee80211com *, struct mbuf *);
static __inline int
ieee80211_radiotap_active(const struct ieee80211com *ic)
{
return (ic->ic_flags_ext & IEEE80211_FEXT_BPF) != 0;
}
static __inline int
ieee80211_radiotap_active_vap(const struct ieee80211vap *vap)
{
return (vap->iv_flags_ext & IEEE80211_FEXT_BPF) ||
vap->iv_ic->ic_montaps != 0;
}
/*
* Enqueue a task on the state thread.
*/
static __inline void
ieee80211_runtask(struct ieee80211com *ic, struct task *task)
{
taskqueue_enqueue(ic->ic_tq, task);
}
/*
* Wait for a queued task to complete.
*/
static __inline void
ieee80211_draintask(struct ieee80211com *ic, struct task *task)
{
taskqueue_drain(ic->ic_tq, task);
}
/*
* Key update synchronization methods. XXX should not be visible.
*/
static __inline void
ieee80211_key_update_begin(struct ieee80211vap *vap)
{
vap->iv_key_update_begin(vap);
}
static __inline void
ieee80211_key_update_end(struct ieee80211vap *vap)
{
vap->iv_key_update_end(vap);
}
/*
* XXX these need to be here for IEEE80211_F_DATAPAD
*/
/*
* Return the space occupied by the 802.11 header and any
* padding required by the driver. This works for a
* management or data frame.
*/
static __inline int
ieee80211_hdrspace(struct ieee80211com *ic, const void *data)
{
int size = ieee80211_hdrsize(data);
if (ic->ic_flags & IEEE80211_F_DATAPAD)
size = roundup(size, sizeof(uint32_t));
return size;
}
/*
* Like ieee80211_hdrspace, but handles any type of frame.
*/
static __inline int
ieee80211_anyhdrspace(struct ieee80211com *ic, const void *data)
{
int size = ieee80211_anyhdrsize(data);
if (ic->ic_flags & IEEE80211_F_DATAPAD)
size = roundup(size, sizeof(uint32_t));
return size;
}
/*
* Notify a vap that beacon state has been updated.
*/
static __inline void
ieee80211_beacon_notify(struct ieee80211vap *vap, int what)
{
if (vap->iv_state == IEEE80211_S_RUN)
vap->iv_update_beacon(vap, what);
}
/*
* Calculate HT channel promotion flags for a channel.
* XXX belongs in ieee80211_ht.h but needs IEEE80211_FHT_*
*/
static __inline int
ieee80211_htchanflags(const struct ieee80211_channel *c)
{
return IEEE80211_IS_CHAN_HT40(c) ?
IEEE80211_FHT_HT | IEEE80211_FHT_USEHT40 :
IEEE80211_IS_CHAN_HT(c) ? IEEE80211_FHT_HT : 0;
}
/*
* Fetch the current TX power (cap) for the given node.
*
* This includes the node and ic/vap TX power limit as needed,
* but it doesn't take into account any per-rate limit.
*/
static __inline uint16_t
ieee80211_get_node_txpower(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
uint16_t txpower;
txpower = ni->ni_txpower;
txpower = MIN(txpower, ic->ic_txpowlimit);
if (ic->ic_curchan != NULL) {
txpower = MIN(txpower, 2 * ic->ic_curchan->ic_maxregpower);
txpower = MIN(txpower, ic->ic_curchan->ic_maxpower);
}
return (txpower);
}
/*
* Debugging facilities compiled in when IEEE80211_DEBUG is defined.
*
* The intent is that any problem in the net80211 layer can be
* diagnosed by inspecting the statistics (dumped by the wlanstats
* program) and/or the msgs generated by net80211. Messages are
* broken into functional classes and can be controlled with the
* wlandebug program. Certain of these msg groups are for facilities
* that are no longer part of net80211 (e.g. IEEE80211_MSG_DOT1XSM).
*/
#define IEEE80211_MSG_11N 0x80000000 /* 11n mode debug */
#define IEEE80211_MSG_DEBUG 0x40000000 /* IFF_DEBUG equivalent */
#define IEEE80211_MSG_DUMPPKTS 0x20000000 /* IFF_LINK2 equivalant */
#define IEEE80211_MSG_CRYPTO 0x10000000 /* crypto work */
#define IEEE80211_MSG_INPUT 0x08000000 /* input handling */
#define IEEE80211_MSG_XRATE 0x04000000 /* rate set handling */
#define IEEE80211_MSG_ELEMID 0x02000000 /* element id parsing */
#define IEEE80211_MSG_NODE 0x01000000 /* node handling */
#define IEEE80211_MSG_ASSOC 0x00800000 /* association handling */
#define IEEE80211_MSG_AUTH 0x00400000 /* authentication handling */
#define IEEE80211_MSG_SCAN 0x00200000 /* scanning */
#define IEEE80211_MSG_OUTPUT 0x00100000 /* output handling */
#define IEEE80211_MSG_STATE 0x00080000 /* state machine */
#define IEEE80211_MSG_POWER 0x00040000 /* power save handling */
#define IEEE80211_MSG_HWMP 0x00020000 /* hybrid mesh protocol */
#define IEEE80211_MSG_DOT1XSM 0x00010000 /* 802.1x state machine */
#define IEEE80211_MSG_RADIUS 0x00008000 /* 802.1x radius client */
#define IEEE80211_MSG_RADDUMP 0x00004000 /* dump 802.1x radius packets */
#define IEEE80211_MSG_MESH 0x00002000 /* mesh networking */
#define IEEE80211_MSG_WPA 0x00001000 /* WPA/RSN protocol */
#define IEEE80211_MSG_ACL 0x00000800 /* ACL handling */
#define IEEE80211_MSG_WME 0x00000400 /* WME protocol */
#define IEEE80211_MSG_SUPERG 0x00000200 /* Atheros SuperG protocol */
#define IEEE80211_MSG_DOTH 0x00000100 /* 802.11h support */
#define IEEE80211_MSG_INACT 0x00000080 /* inactivity handling */
#define IEEE80211_MSG_ROAM 0x00000040 /* sta-mode roaming */
#define IEEE80211_MSG_RATECTL 0x00000020 /* tx rate control */
#define IEEE80211_MSG_ACTION 0x00000010 /* action frame handling */
#define IEEE80211_MSG_WDS 0x00000008 /* WDS handling */
#define IEEE80211_MSG_IOCTL 0x00000004 /* ioctl handling */
#define IEEE80211_MSG_TDMA 0x00000002 /* TDMA handling */
#define IEEE80211_MSG_ANY 0xffffffff /* anything */
#define IEEE80211_MSG_BITS \
"\20\2TDMA\3IOCTL\4WDS\5ACTION\6RATECTL\7ROAM\10INACT\11DOTH\12SUPERG" \
"\13WME\14ACL\15WPA\16RADKEYS\17RADDUMP\20RADIUS\21DOT1XSM\22HWMP" \
"\23POWER\24STATE\25OUTPUT\26SCAN\27AUTH\30ASSOC\31NODE\32ELEMID" \
"\33XRATE\34INPUT\35CRYPTO\36DUPMPKTS\37DEBUG\04011N"
#ifdef IEEE80211_DEBUG
#define ieee80211_msg(_vap, _m) ((_vap)->iv_debug & (_m))
#define IEEE80211_DPRINTF(_vap, _m, _fmt, ...) do { \
if (ieee80211_msg(_vap, _m)) \
ieee80211_note(_vap, _fmt, __VA_ARGS__); \
} while (0)
#define IEEE80211_NOTE(_vap, _m, _ni, _fmt, ...) do { \
if (ieee80211_msg(_vap, _m)) \
ieee80211_note_mac(_vap, (_ni)->ni_macaddr, _fmt, __VA_ARGS__);\
} while (0)
#define IEEE80211_NOTE_MAC(_vap, _m, _mac, _fmt, ...) do { \
if (ieee80211_msg(_vap, _m)) \
ieee80211_note_mac(_vap, _mac, _fmt, __VA_ARGS__); \
} while (0)
#define IEEE80211_NOTE_FRAME(_vap, _m, _wh, _fmt, ...) do { \
if (ieee80211_msg(_vap, _m)) \
ieee80211_note_frame(_vap, _wh, _fmt, __VA_ARGS__); \
} while (0)
void ieee80211_note(const struct ieee80211vap *, const char *, ...);
void ieee80211_note_mac(const struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN], const char *, ...);
void ieee80211_note_frame(const struct ieee80211vap *,
const struct ieee80211_frame *, const char *, ...);
#define ieee80211_msg_debug(_vap) \
((_vap)->iv_debug & IEEE80211_MSG_DEBUG)
#define ieee80211_msg_dumppkts(_vap) \
((_vap)->iv_debug & IEEE80211_MSG_DUMPPKTS)
#define ieee80211_msg_input(_vap) \
((_vap)->iv_debug & IEEE80211_MSG_INPUT)
#define ieee80211_msg_radius(_vap) \
((_vap)->iv_debug & IEEE80211_MSG_RADIUS)
#define ieee80211_msg_dumpradius(_vap) \
((_vap)->iv_debug & IEEE80211_MSG_RADDUMP)
#define ieee80211_msg_dumpradkeys(_vap) \
((_vap)->iv_debug & IEEE80211_MSG_RADKEYS)
#define ieee80211_msg_scan(_vap) \
((_vap)->iv_debug & IEEE80211_MSG_SCAN)
#define ieee80211_msg_assoc(_vap) \
((_vap)->iv_debug & IEEE80211_MSG_ASSOC)
/*
* Emit a debug message about discarding a frame or information
* element. One format is for extracting the mac address from
* the frame header; the other is for when a header is not
* available or otherwise appropriate.
*/
#define IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...) do { \
if ((_vap)->iv_debug & (_m)) \
ieee80211_discard_frame(_vap, _wh, _type, _fmt, __VA_ARGS__);\
} while (0)
#define IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...) do { \
if ((_vap)->iv_debug & (_m)) \
ieee80211_discard_ie(_vap, _wh, _type, _fmt, __VA_ARGS__);\
} while (0)
#define IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...) do { \
if ((_vap)->iv_debug & (_m)) \
ieee80211_discard_mac(_vap, _mac, _type, _fmt, __VA_ARGS__);\
} while (0)
void ieee80211_discard_frame(const struct ieee80211vap *,
const struct ieee80211_frame *, const char *type, const char *fmt, ...);
void ieee80211_discard_ie(const struct ieee80211vap *,
const struct ieee80211_frame *, const char *type, const char *fmt, ...);
void ieee80211_discard_mac(const struct ieee80211vap *,
const uint8_t mac[IEEE80211_ADDR_LEN], const char *type,
const char *fmt, ...);
#else
#define IEEE80211_DPRINTF(_vap, _m, _fmt, ...)
#define IEEE80211_NOTE(_vap, _m, _ni, _fmt, ...)
#define IEEE80211_NOTE_FRAME(_vap, _m, _wh, _fmt, ...)
#define IEEE80211_NOTE_MAC(_vap, _m, _mac, _fmt, ...)
#define ieee80211_msg_dumppkts(_vap) 0
#define ieee80211_msg(_vap, _m) 0
#define IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...)
#define IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...)
#define IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...)
#endif
#endif /* _NET80211_IEEE80211_VAR_H_ */
Index: head/tools/tools/iwn/iwnstats/main.c
===================================================================
--- head/tools/tools/iwn/iwnstats/main.c (revision 287196)
+++ head/tools/tools/iwn/iwnstats/main.c (revision 287197)
@@ -1,341 +1,328 @@
/*-
* Copyright (c) 2014 Adrian Chadd <adrian@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <unistd.h>
#include <string.h>
#include <err.h>
#include <net/if.h>
#include <sys/endian.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include "net80211/ieee80211_ioctl.h"
#include "net80211/ieee80211_radiotap.h"
#include "if_iwn_ioctl.h"
#include "if_iwnreg.h"
#include "iwnstats.h"
#include "iwn_ioctl.h"
-#define IWN_DEFAULT_IF "iwn0"
+#define IWN_DEFAULT_IF "wlan0"
static struct iwnstats *
iwnstats_new(const char *ifname)
{
struct iwnstats *is;
is = calloc(1, sizeof(struct iwnstats));
if (is == NULL)
return (NULL);
is->s = socket(AF_INET, SOCK_DGRAM, 0);
if (is->s < 0)
err(1, "socket");
iwn_setifname(is, ifname);
return (is);
}
static void
iwn_stats_phy_print(struct iwnstats *is, struct iwn_rx_phy_stats *rxphy,
const char *prefix)
{
printf("%s: %s: ina=%d, fina=%d, bad_plcp=%d, bad_crc32=%d, overrun=%d, eoverrun=%d\n",
__func__,
prefix,
le32toh(rxphy->ina),
le32toh(rxphy->fina),
le32toh(rxphy->bad_plcp),
le32toh(rxphy->bad_crc32),
le32toh(rxphy->overrun),
le32toh(rxphy->eoverrun));
printf("%s: %s: fa=%d, bad_fina_sync=%d, sfd_timeout=%d, fina_timeout=%d, no_rts_ack=%d\n",
__func__,
prefix,
le32toh(rxphy->fa),
le32toh(rxphy->bad_fina_sync),
le32toh(rxphy->sfd_timeout),
le32toh(rxphy->fina_timeout),
le32toh(rxphy->no_rts_ack));
printf("%s: %s: rxe_limit=%d, ack=%d, cts=%d, ba_resp=%d, dsp_kill=%d, bad_mh=%d, rssi_sum=%d\n",
__func__,
prefix,
le32toh(rxphy->rxe_limit),
le32toh(rxphy->ack),
le32toh(rxphy->cts),
le32toh(rxphy->ba_resp),
le32toh(rxphy->dsp_kill),
le32toh(rxphy->bad_mh),
le32toh(rxphy->rssi_sum));
}
static void
iwn_stats_rx_general_print(struct iwnstats *is, struct iwn_rx_general_stats *g)
{
printf("%s: bad_cts=%d, bad_ack=%d, not_bss=%d, filtered=%d, bad_chan=%d, beacons=%d\n",
__func__,
le32toh(g->bad_cts),
le32toh(g->bad_ack),
le32toh(g->not_bss),
le32toh(g->filtered),
le32toh(g->bad_chan),
le32toh(g->beacons));
/* XXX it'd be nice to have adc/ina saturated as a % of time */
printf("%s: missed_beacons=%d, adc_saturated=%d, ina_searched=%d\n",
__func__,
le32toh(g->missed_beacons),
le32toh(g->adc_saturated),
le32toh(g->ina_searched));
printf("%s: noise=[%d, %d, %d] flags=0x%08x, load=%d, fa=%d\n",
__func__,
le32toh(g->noise[0]),
le32toh(g->noise[1]),
le32toh(g->noise[2]),
le32toh(g->flags),
le32toh(g->load),
le32toh(g->fa));
printf("%s: rssi=[%d, %d, %d] energy=[%d %d %d]\n",
__func__,
le32toh(g->rssi[0]),
le32toh(g->rssi[1]),
le32toh(g->rssi[2]),
le32toh(g->energy[0]),
le32toh(g->energy[1]),
le32toh(g->energy[2]));
}
static void
iwn_stats_tx_print(struct iwnstats *is, struct iwn_tx_stats *tx)
{
printf("%s: preamble=%d, rx_detected=%d, bt_defer=%d, bt_kill=%d, short_len=%d\n",
__func__,
le32toh(tx->preamble),
le32toh(tx->rx_detected),
le32toh(tx->bt_defer),
le32toh(tx->bt_kill),
le32toh(tx->short_len));
printf("%s: cts_timeout=%d, ack_timeout=%d, exp_ack=%d, ack=%d, msdu=%d\n",
__func__,
le32toh(tx->cts_timeout),
le32toh(tx->ack_timeout),
le32toh(tx->exp_ack),
le32toh(tx->ack),
le32toh(tx->msdu));
printf("%s: burst_err1=%d, burst_err2=%d, cts_collision=%d, ack_collision=%d\n",
__func__,
le32toh(tx->burst_err1),
le32toh(tx->burst_err2),
le32toh(tx->cts_collision),
le32toh(tx->ack_collision));
printf("%s: ba_timeout=%d, ba_resched=%d, query_ampdu=%d, query=%d, query_ampdu_frag=%d\n",
__func__,
le32toh(tx->ba_timeout),
le32toh(tx->ba_resched),
le32toh(tx->query_ampdu),
le32toh(tx->query),
le32toh(tx->query_ampdu_frag));
printf("%s: query_mismatch=%d, not_ready=%d, underrun=%d, bt_ht_kill=%d, rx_ba_resp=%d\n",
__func__,
le32toh(tx->query_mismatch),
le32toh(tx->not_ready),
le32toh(tx->underrun),
le32toh(tx->bt_ht_kill),
le32toh(tx->rx_ba_resp));
}
static void
iwn_stats_ht_phy_print(struct iwnstats *is, struct iwn_rx_ht_phy_stats *ht)
{
printf("%s: bad_plcp=%d, overrun=%d, eoverrun=%d, good_crc32=%d, bad_crc32=%d\n",
__func__,
le32toh(ht->bad_plcp),
le32toh(ht->overrun),
le32toh(ht->eoverrun),
le32toh(ht->good_crc32),
le32toh(ht->bad_crc32));
printf("%s: bad_mh=%d, good_ampdu_crc32=%d, ampdu=%d, fragment=%d\n",
__func__,
le32toh(ht->bad_plcp),
le32toh(ht->good_ampdu_crc32),
le32toh(ht->ampdu),
le32toh(ht->fragment));
}
static void
iwn_stats_general_print(struct iwnstats *is, struct iwn_stats *stats)
{
/* General */
printf("%s: temp=%d, temp_m=%d, burst_check=%d, burst=%d, sleep=%d, slot_out=%d, slot_idle=%d\n",
__func__,
le32toh(stats->general.temp),
le32toh(stats->general.temp_m),
le32toh(stats->general.burst_check),
le32toh(stats->general.burst),
le32toh(stats->general.sleep),
le32toh(stats->general.slot_out),
le32toh(stats->general.slot_idle));
printf("%s: slot_out=%d, ttl_tstamp=0x%08x, tx_ant_a=%d, tx_ant_b=%d, exec=%d, probe=%d\n",
__func__,
le32toh(stats->general.slot_out),
le32toh(stats->general.ttl_tstamp),
le32toh(stats->general.tx_ant_a),
le32toh(stats->general.tx_ant_b),
le32toh(stats->general.exec),
le32toh(stats->general.probe));
printf("%s: rx_enabled=%d\n",
__func__,
le32toh(stats->general.rx_enabled));
}
static void
iwn_print(struct iwnstats *is)
{
struct iwn_stats *s;
struct timeval tv;
s = &is->st;
gettimeofday(&tv, NULL);
printf("time=%ld.%.6ld\n", (long)tv.tv_sec, (long)tv.tv_usec);
iwn_stats_general_print(is, s);
/* RX */
iwn_stats_phy_print(is, &s->rx.ofdm, "ofdm");
iwn_stats_phy_print(is, &s->rx.cck, "cck");
iwn_stats_ht_phy_print(is, &s->rx.ht);
iwn_stats_rx_general_print(is, &s->rx.general);
/* TX */
iwn_stats_tx_print(is, &s->tx);
printf("--\n");
}
static void
usage(void)
{
printf("Usage: iwnstats [-h] [-i ifname]\n");
printf(" -h: Help\n");
printf(" -i <ifname>: Use ifname (default %s)\n",
IWN_DEFAULT_IF);
}
int
main(int argc, char *argv[])
{
struct iwnstats *is;
int ch;
char *ifname;
bool first;
char *sysctlname;
size_t len;
int ret;
ifname = strdup(IWN_DEFAULT_IF);
/* Parse command line arguments */
while ((ch = getopt(argc, argv,
"hi:")) != -1) {
switch (ch) {
case 'i':
if (ifname)
free(ifname);
ifname = strdup(optarg);
- if (strncmp(ifname, "wlan", 4) == 0) {
- free(ifname);
- len = 0;
- asprintf(&sysctlname, "net.wlan.%s.%%parent", ifname + 4);
- ret = sysctlbyname(sysctlname, NULL, &len, NULL, 0);
- if (ret != 0)
- err(1, "sysctl failed");
- ifname = calloc(len, 1);
- ret = sysctlbyname(sysctlname, ifname, &len, NULL, 0);
- if (ret != 0)
- err(1, "sysctl failed");
- free(sysctlname);
- }
break;
default:
case '?':
case 'h':
usage();
exit(1);
}
}
is = iwnstats_new(ifname);
if (is == NULL) {
fprintf(stderr, "%s: couldn't allocate new stats structure\n",
argv[0]);
exit(127);
}
/* begin fetching data */
first = true;
while (1) {
if (iwn_collect(is) != 0) {
fprintf(stderr, "%s: fetch failed\n", argv[0]);
if (first)
return 1;
goto next;
}
iwn_print(is);
next:
usleep(100 * 1000);
first = false;
}
exit(0);
}

File Metadata

Mime Type
application/octet-stream
Expires
Thu, Apr 25, 7:26 AM (1 d, 23 h)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
9rLOQWwhSIi1
Default Alt Text
(4 MB)

Event Timeline